/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
gpu_device.cc | 22 #include "tensorflow/core/common_runtime/gpu/gpu_device.h" 34 #include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h" 35 #include "tensorflow/core/common_runtime/gpu/gpu_id.h" 36 #include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h" 37 #include "tensorflow/core/common_runtime/gpu/gpu_id_utils.h" 38 #include "tensorflow/core/common_runtime/gpu/gpu_init.h" 39 #include "tensorflow/core/common_runtime/gpu/gpu_stream_util.h" 40 #include "tensorflow/core/common_runtime/gpu/gpu_util.h" 41 #include "tensorflow/core/common_runtime/gpu/process_state.h" 121 strings::StrCat("Ran out of GPU memory when allocating ", num_bytes [all...] |
/external/tensorflow/tensorflow/tools/ci_build/windows/gpu/pip/ |
build_tf_windows.sh | 32 # This script is under <repo_root>/tensorflow/tools/ci_build/windows/gpu/pip/ 35 cd ${script_dir%%tensorflow/tools/ci_build/windows/gpu/pip}. 61 # GPU tests are very flaky when running concurrently, so set local_test_jobs=1
|
/external/vulkan-validation-layers/layers/ |
unique_objects.h | 73 VkPhysicalDevice gpu; member in struct:unique_objects::layer_data 82 layer_data() : wsi_enabled(false), gpu(VK_NULL_HANDLE){};
|
/external/skia/gm/ |
imagefromyuvtextures.cpp | 8 // This test only works with the GPU backend. 98 GrGpu* gpu = context->contextPriv().getGpu(); local 99 if (!gpu) { 105 yuvTextures[i] = gpu->createTestingOnlyBackendTexture(fYUVBmps[i].getPixels(), 116 GrGpu* gpu = context->contextPriv().getGpu(); local 117 if (!gpu) { 123 gpu->deleteTestingOnlyBackendTexture(&yuvTextures[i]);
|
/external/skqp/gm/ |
imagefromyuvtextures.cpp | 8 // This test only works with the GPU backend. 98 GrGpu* gpu = context->contextPriv().getGpu(); local 99 if (!gpu) { 105 yuvTextures[i] = gpu->createTestingOnlyBackendTexture(fYUVBmps[i].getPixels(), 116 GrGpu* gpu = context->contextPriv().getGpu(); local 117 if (!gpu) { 123 gpu->deleteTestingOnlyBackendTexture(&yuvTextures[i]);
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
while_transformer_test.cc | 16 #include "tensorflow/compiler/xla/service/gpu/while_transformer.h" 19 #include "tensorflow/compiler/xla/service/gpu/instruction_fusion.h" 111 EXPECT_TRUE(gpu::GpuInstructionFusion(/*may_duplicate=*/false) 114 EXPECT_TRUE(gpu::GpuInstructionFusion(/*may_duplicate=*/true) 155 auto result = gpu::CanTransformWhileToFor(while_hlo); 175 auto result = gpu::CanTransformWhileToFor(while_hlo); 195 auto result = gpu::CanTransformWhileToFor(while_hlo); 214 auto result = gpu::CanTransformWhileToFor(while_hlo);
|
/external/tensorflow/tensorflow/core/kernels/ |
dynamic_partition_op_test.cc | 207 BM_DYNAMIC_PARTITION(gpu, float, 2); 208 BM_DYNAMIC_PARTITION(gpu, float, 100); 209 BM_DYNAMIC_PARTITION(gpu, double, 2); 210 BM_DYNAMIC_PARTITION(gpu, double, 100); 211 BM_DYNAMIC_PARTITION(gpu, complex64, 2); 212 BM_DYNAMIC_PARTITION(gpu, complex64, 100);
|
random_op_test.cc | 72 BM_RNG(gpu, RandomUniform); 73 BM_RNG(gpu, RandomNormal); 74 BM_RNG(gpu, TruncatedNormal);
|
/external/tensorflow/tensorflow/stream_executor/cuda/ |
cuda_fft.cc | 547 namespace gpu = ::perftools::gputools; 550 gpu::port::Status status = 551 gpu::PluginRegistry::Instance() 552 ->RegisterFactory<gpu::PluginRegistry::FftFactory>( 553 gpu::cuda::kCudaPlatformId, gpu::cuda::kCuFftPlugin, "cuFFT", 554 [](gpu::internal::StreamExecutorInterface 555 *parent) -> gpu::fft::FftSupport * { 556 gpu::cuda::CUDAExecutor *cuda_executor = 557 dynamic_cast<gpu::cuda::CUDAExecutor *>(parent) [all...] |
/external/skia/tests/ |
ProxyTest.cpp | 8 // This is a GPU-backend specific test. 190 GrGpu* gpu = ctxInfo.grContext()->contextPriv().getGpu(); local 231 gpu->createTestingOnlyBackendTexture(nullptr, kWidthHeight, 238 gpu->deleteTestingOnlyBackendTexture(&backendTex); 250 gpu->deleteTestingOnlyBackendTexture(&backendTex); 256 gpu->createTestingOnlyBackendTexture(nullptr, kWidthHeight, 264 gpu->deleteTestingOnlyBackendTexture(&backendTex); 276 gpu->deleteTestingOnlyBackendTexture(&backendTex); 283 gpu->createTestingOnlyBackendTexture(nullptr, kWidthHeight, 292 gpu->deleteTestingOnlyBackendTexture(&backendTex) [all...] |
SurfaceTest.cpp | 110 auto* gpu = ctxInfo.grContext()->contextPriv().getGpu(); local 111 GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture( 128 gpu->deleteTestingOnlyBackendTexture(&backendTex); 139 backendTex = gpu->createTestingOnlyBackendTexture(nullptr, kSize, kSize, colorType, true, 177 gpu->deleteTestingOnlyBackendTexture(&backendTex); 191 auto* gpu = ctxInfo.grContext()->contextPriv().getGpu(); local 192 GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture( 278 // GPU doesn't support creating unpremul surfaces, so only test opaque + premul 703 GrGpu* gpu = context->contextPriv().getGpu(); local 710 *outTexture = gpu->createTestingOnlyBackendTexture 730 GrGpu* gpu = context->contextPriv().getGpu(); local 797 GrGpu* gpu = context->contextPriv().getGpu(); local 875 GrGpu* gpu = ctxInfo.grContext()->contextPriv().getGpu(); local 898 GrGpu* gpu = ctxInfo.grContext()->contextPriv().getGpu(); local 1041 GrGpu* gpu = context->contextPriv().getGpu(); local [all...] |
TransferPixelsTest.cpp | 8 // This is a GPU-backend specific test. It relies on static intializers to work 72 GrGpu* gpu = context->contextPriv().getGpu(); local 126 result = gpu->transferPixels(tex.get(), 0, 0, kTextureWidth, kTextureHeight, colorType, 131 result = gpu->readPixels(tex.get(), origin, 0, 0, kTextureWidth, kTextureHeight, colorType, 158 result = gpu->transferPixels(tex.get(), kLeft, kTop, kWidth, kHeight, colorType, 163 result = gpu->readPixels(tex.get(), origin, 0, 0, kTextureWidth, kTextureHeight, colorType,
|
/external/libdrm/tests/etnaviv/ |
etnaviv_2d_test.c | 161 struct etna_gpu *gpu; local 190 gpu = etna_gpu_new(dev, 0); 191 if (!gpu) { 196 pipe = etna_pipe_new(gpu, ETNA_PIPE_2D); 231 etna_gpu_del(gpu);
|
/external/skia/src/gpu/vk/ |
GrVkPipelineStateBuilder.cpp | 19 GrVkGpu* gpu, 28 GrVkPipelineStateBuilder builder(gpu, pipeline, primProc, desc); 38 GrVkPipelineStateBuilder::GrVkPipelineStateBuilder(GrVkGpu* gpu, 43 , fGpu(gpu) 146 settings.fSharpenTextures = this->gpu()->getContext()->contextPriv().sharpenMipmappedTextures();
|
GrVkCopyPipeline.cpp | 29 GrVkCopyPipeline* GrVkCopyPipeline::Create(GrVkGpu* gpu, 177 VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(),
|
GrVkGpuCommandBuffer.h | 26 GrVkGpuTextureCommandBuffer(GrVkGpu* gpu, GrTexture* texture, GrSurfaceOrigin origin) 28 , fGpu(gpu) { 82 GrGpu* gpu() override; 162 // The PreDrawUploads and PreCopies are sent to the GPU before submitting the secondary
|
/external/libdrm/etnaviv/ |
etnaviv_drmif.h | 92 /* gpu functions: 96 void etna_gpu_del(struct etna_gpu *gpu); 97 int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param, 104 struct etna_pipe *etna_pipe_new(struct etna_gpu *gpu, enum etna_pipe_id id);
|
/external/skia/src/gpu/ |
GrTextureAdjuster.cpp | 85 GrGpu* gpu = fContext->contextPriv().getGpu(); local 86 if (!gpu) { 90 if (!gpu->isACopyNeededForTextureParams(proxy.get(), params, ©Params, scaleAdjust)) {
|
GrTextureMaker.cpp | 31 GrGpu* gpu = fContext->contextPriv().getGpu(); local 35 if (!gpu->isACopyNeededForTextureParams(original.get(), params, ©Params, scaleAdjust)) { 39 if (!gpu->isACopyNeededForTextureParams(this->width(), this->height(),
|
/external/skqp/src/gpu/ |
GrResourceProvider.h | 42 GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner); 185 * creating a buffer to guarantee it resides in GPU memory. 279 GrGpu* gpu() { return fGpu; } function in class:GrResourceProvider 280 const GrGpu* gpu() const { return fGpu; } function in class:GrResourceProvider
|
GrTextureMaker.cpp | 31 GrGpu* gpu = fContext->contextPriv().getGpu(); local 35 if (!gpu->isACopyNeededForTextureParams(original.get(), params, ©Params, scaleAdjust)) { 39 if (!gpu->isACopyNeededForTextureParams(this->width(), this->height(),
|
/external/skqp/src/gpu/vk/ |
GrVkCopyPipeline.cpp | 29 GrVkCopyPipeline* GrVkCopyPipeline::Create(GrVkGpu* gpu, 177 VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(),
|
GrVkGpuCommandBuffer.h | 26 GrVkGpuTextureCommandBuffer(GrVkGpu* gpu, GrTexture* texture, GrSurfaceOrigin origin) 28 , fGpu(gpu) { 82 GrGpu* gpu() override; 174 // The PreDrawUploads and PreCopies are sent to the GPU before submitting the secondary
|
/external/tensorflow/tensorflow/python/eager/ |
benchmarks_test.py | 20 To run GPU benchmarks: 50 GPU = "/device:GPU:0" 87 # call func to maybe warm up the GPU 105 if device == GPU: 106 # Warmup the GPU 131 self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU) 138 GPU) 141 # int32's are kept on host memory even when executing on GPU. 144 self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU) [all...] |
/external/libchrome/base/threading/ |
thread_restrictions.h | 50 namespace gpu { namespace 217 friend class gpu::GpuChannelHost; // http://crbug.com/125264
|