/external/skqp/gm/ |
rectangletexture.cpp | 8 // This test only works with the GPU backend. 65 GrGpu* gpu = context->contextPriv().getGpu(); local 66 if (!gpu) { 69 const GrGLContext* glCtx = gpu->glContextForTesting();
|
/external/skqp/src/gpu/gl/ |
GrGLProgram.cpp | 28 GrGLProgram::GrGLProgram(GrGLGpu* gpu, 45 , fGpu(gpu) 46 , fProgramDataManager(gpu, programID, uniforms, pathProcVaryings)
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
process_state.cc | 16 #include "tensorflow/core/common_runtime/gpu/process_state.h" 21 #include "tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h" 22 #include "tensorflow/core/common_runtime/gpu/gpu_cudamalloc_allocator.h" 23 #include "tensorflow/core/common_runtime/gpu/gpu_debug_allocator.h" 24 #include "tensorflow/core/common_runtime/gpu/gpu_id.h" 25 #include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h" 26 #include "tensorflow/core/common_runtime/gpu/gpu_id_utils.h" 27 #include "tensorflow/core/common_runtime/gpu/gpu_init.h" 28 #include "tensorflow/core/common_runtime/gpu/pool_allocator.h" 42 // If true, register CPU RAM used to copy to/from GPU RAM with th [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
parameterized_truncated_normal_op_test.cc | 134 BM_PTruncatedNormalDev(gpu, 1000, 1000); 135 BM_PTruncatedNormalDev_2SD(gpu, 10000, 100); 136 BM_PTruncatedNormalDev_OneTail(gpu, 10000, 100);
|
/external/eigen/demos/opengl/ |
quaternion_demo.cpp | 102 gpu.pushMatrix(GL_MODELVIEW); 103 gpu.multMatrix(t.matrix(),GL_MODELVIEW); 105 gpu.popMatrix(GL_MODELVIEW); 232 gpu.drawVector(Vector3f::Zero(), length*Vector3f::UnitX(), Color(1,0,0,1)); 233 gpu.drawVector(Vector3f::Zero(), length*Vector3f::UnitY(), Color(0,1,0,1)); 234 gpu.drawVector(Vector3f::Zero(), length*Vector3f::UnitZ(), Color(0,0,1,1));
|
/external/mesa3d/src/gallium/drivers/vc4/ |
vc4_tiling.c | 211 vc4_t_image_helper(void *gpu, uint32_t gpu_stride, 252 gpu + gpu_offset, 256 vc4_store_lt_image(gpu + gpu_offset,
|
/external/skia/src/gpu/ops/ |
GrDrawPathOp.cpp | 73 state->gpu()->pathRendering()->drawPath(pipeline, *pathProc, stencil, fPath.get()); 186 state->gpu()->pathRendering()->drawPaths(pipeline, 214 state->gpu()->pathRendering()->drawPaths(pipeline,
|
/external/skia/tests/ |
GLProgramsTest.cpp | 8 // This is a GPU-backend specific test. It relies on static intializers to work 354 GrGLGpu* gpu = static_cast<GrGLGpu*>(context->contextPriv().getGpu()); local 356 if (kGLES_GrGLStandard == gpu->glStandard()) { 361 if (kARM_GrGLVendor != gpu->ctxInfo().vendor()) {
|
GrSurfaceTest.cpp | 28 GrGpu* gpu = context->contextPriv().getGpu(); local 55 GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture( 70 gpu->deleteTestingOnlyBackendTexture(&backendTex);
|
/external/skqp/src/gpu/ops/ |
GrDrawPathOp.cpp | 73 state->gpu()->pathRendering()->drawPath(pipeline, *pathProc, stencil, fPath.get()); 186 state->gpu()->pathRendering()->drawPaths(pipeline, 214 state->gpu()->pathRendering()->drawPaths(pipeline,
|
/external/skqp/tests/ |
GLProgramsTest.cpp | 8 // This is a GPU-backend specific test. It relies on static intializers to work 353 GrGLGpu* gpu = static_cast<GrGLGpu*>(context->contextPriv().getGpu()); local 355 if (kGLES_GrGLStandard == gpu->glStandard()) { 360 if (kARM_GrGLVendor != gpu->ctxInfo().vendor()) {
|
GrSurfaceTest.cpp | 28 GrGpu* gpu = context->contextPriv().getGpu(); local 55 GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture( 70 gpu->deleteTestingOnlyBackendTexture(&backendTex);
|
/cts/tests/tests/graphics/jni/ |
VulkanTestHelpers.h | 37 VkPhysicalDevice gpu() { return mGpu; } function in class:VkInit
|
/external/skia/src/gpu/ |
GrOpFlushState.h | 45 GrGpu* gpu() { return fGpu; } function in class:final 112 // that share a geometry processor into a Draw is that it allows the Gpu object to setup
|
/external/skia/tools/flags/ |
SkCommonFlags.cpp | 20 DEFINE_bool(gpu, true, "master switch for running GPU-bound work."); 50 "Test releasing all gpu resources and abandoning the GrContext after running each " 53 DEFINE_bool(disableDriverCorrectnessWorkarounds, false, "Disables all GPU driver correctness " 155 DEFINE_int32(gpuThreads, 2, "Create this many extra threads to assist with GPU work, " 158 DEFINE_bool(cachePathMasks, true, "Allows path mask textures to be cached in GPU configs."); 163 "Set of enabled gpu path renderers. Defined as a list of: "
|
/external/skqp/src/gpu/ |
GrOpFlushState.h | 45 GrGpu* gpu() { return fGpu; } function in class:final 106 // that share a geometry processor into a Draw is that it allows the Gpu object to setup
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
convolution_thunk.cc | 16 #include "tensorflow/compiler/xla/service/gpu/convolution_thunk.h" 20 #include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.h" 31 namespace gpu { namespace in namespace:xla 106 } // namespace gpu
|
cudnn_batchnorm_thunk.h | 20 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 21 #include "tensorflow/compiler/xla/service/gpu/thunk.h" 27 namespace gpu { namespace in namespace:xla 142 } // namespace gpu
|
gpu_compiler.h | 37 namespace gpu { namespace in namespace:xla 39 // The GPU compiler generates efficient GPU executables. 154 } // namespace gpu
|
gpu_copy_insertion.cc | 16 #include "tensorflow/compiler/xla/service/gpu/gpu_copy_insertion.h" 24 #include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h" 35 namespace gpu { namespace in namespace:xla 129 // The GPU backend needs additional copies added due to deficiencies in 137 } // namespace gpu
|
ir_emitter_nested.cc | 19 #include "tensorflow/compiler/xla/service/gpu/ir_emitter_nested.h" 25 #include "tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h" 26 #include "tensorflow/compiler/xla/service/gpu/ir_emitter_context.h" 35 namespace gpu { namespace in namespace:xla 124 } // namespace gpu
|
kernel_thunk.cc | 16 #include "tensorflow/compiler/xla/service/gpu/kernel_thunk.h" 19 #include "tensorflow/compiler/xla/service/gpu/gpu_executable.h" 29 namespace gpu { namespace in namespace:xla 103 } // namespace gpu
|
parallel_loop_emitter.cc | 16 #include "tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h" 31 namespace gpu { namespace in namespace:xla 122 } // namespace gpu
|
/external/skia/src/gpu/vk/ |
GrVkGpuCommandBuffer.cpp | 76 GrVkGpuRTCommandBuffer::GrVkGpuRTCommandBuffer(GrVkGpu* gpu, 81 , fGpu(gpu) 142 GrGpu* GrVkGpuRTCommandBuffer::gpu() { return fGpu; } function in class:GrVkGpuRTCommandBuffer 548 static void set_texture_layout(GrVkTexture* vkTexture, GrVkGpu* gpu) { 552 vkTexture->setImageLayout(gpu, 559 static void prepare_sampled_images(const GrResourceIOProcessor& processor, GrVkGpu* gpu) { 567 gpu->onResolveRenderTarget(texRT); 573 gpu->generateMipmap(vkTexture, sampler.proxy()->origin()); 577 set_texture_layout(vkTexture, gpu);
|
/external/chromium-trace/catapult/systrace/profile_chrome/ |
chrome_tracing_agent.py | 176 chrome_opts.add_option('--trace-gpu', help='Enable extra trace categories ' 177 'for GPU data.', action='store_true') 203 categories.append('disabled-by-default-gpu.debug*')
|