/external/skia/src/gpu/vk/ |
GrVkPipeline.cpp | 425 GrVkPipeline* GrVkPipeline::Create(GrVkGpu* gpu, const GrPipeline& pipeline, 437 SkASSERT(primProc.numAttribs() <= gpu->vkCaps().maxVertexAttributes()); 451 setup_multisample_state(pipeline, primProc, gpu->caps(), &multisampleInfo); 459 setup_raster_state(pipeline, gpu->caps(), &rasterInfo); 488 VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(), 500 void GrVkPipeline::freeGPUData(const GrVkGpu* gpu) const { 501 GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr)); 504 void GrVkPipeline::SetDynamicScissorRectState(GrVkGpu* gpu, [all...] |
GrVkUtil.h | 55 bool GrCompileVkShaderModule(const GrVkGpu* gpu,
|
/external/skia/tools/flags/ |
SkCommonFlags.h | 17 DECLARE_bool(gpu); variable
|
/external/skqp/src/gpu/gl/ |
GrGLVaryingHandler.cpp | 20 SkASSERT(glPB->gpu()->glCaps().shaderCaps()->pathRenderingSupport() &&
|
GrGLPathRendering.h | 34 GrGLPathRendering(GrGLGpu* gpu); 47 * whether GPU resources should be cleaned up or abandoned when this is called. 124 GrGLGpu* gpu();
|
/external/skqp/src/gpu/vk/ |
GrVkPipeline.cpp | 425 GrVkPipeline* GrVkPipeline::Create(GrVkGpu* gpu, const GrPipeline& pipeline, 437 SkASSERT(primProc.numAttribs() <= gpu->vkCaps().maxVertexAttributes()); 451 setup_multisample_state(pipeline, primProc, gpu->caps(), &multisampleInfo); 459 setup_raster_state(pipeline, gpu->caps(), &rasterInfo); 488 VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(), 500 void GrVkPipeline::freeGPUData(const GrVkGpu* gpu) const { 501 GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr)); 504 void GrVkPipeline::SetDynamicScissorRectState(GrVkGpu* gpu, [all...] |
GrVkUtil.h | 55 bool GrCompileVkShaderModule(const GrVkGpu* gpu,
|
/external/skqp/tools/flags/ |
SkCommonFlags.h | 17 DECLARE_bool(gpu); variable
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
copy_thunk.h | 20 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 21 #include "tensorflow/compiler/xla/service/gpu/thunk.h" 27 namespace gpu { namespace in namespace:xla 76 } // namespace gpu
|
cudnn_convolution_algorithm_picker.h | 20 #include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.h" 27 namespace gpu { namespace in namespace:xla 59 } // namespace gpu
|
for_thunk.cc | 16 #include "tensorflow/compiler/xla/service/gpu/for_thunk.h" 23 namespace gpu { namespace in namespace:xla 49 } // namespace gpu
|
gemm_thunk.h | 20 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 21 #include "tensorflow/compiler/xla/service/gpu/gpu_executable.h" 22 #include "tensorflow/compiler/xla/service/gpu/thunk.h" 29 namespace gpu { namespace in namespace:xla 56 // so, we want the GPU to be quiescent during autotuning, so as not to 85 } // namespace gpu
|
ir_emitter_context.h | 21 #include "tensorflow/compiler/xla/service/gpu/partition_assignment.h" 26 namespace gpu { namespace in namespace:xla 64 } // namespace gpu
|
kernel_thunk.h | 24 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 25 #include "tensorflow/compiler/xla/service/gpu/partition_assignment.h" 26 #include "tensorflow/compiler/xla/service/gpu/thunk.h" 35 namespace gpu { namespace in namespace:xla 88 } // namespace gpu
|
parallel_loop_emitter.h | 20 #include "tensorflow/compiler/xla/service/gpu/partition_assignment.h" 25 namespace gpu { namespace in namespace:xla 67 } // namespace gpu
|
thunk.h | 22 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 28 namespace gpu { namespace in namespace:xla 80 // user's responsibility to wait for all activity on the GPU to finish before 116 } // namespace gpu
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/ |
utils.cc | 16 #include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/utils.h" 39 namespace gpu { namespace in namespace:xla 65 } // namespace gpu
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
gpu_cudamalloc_allocator.cc | 21 #include "tensorflow/core/common_runtime/gpu/gpu_cudamalloc_allocator.h" 23 #include "tensorflow/core/common_runtime/gpu/gpu_id.h" 24 #include "tensorflow/core/common_runtime/gpu/gpu_id_utils.h" 25 #include "tensorflow/core/common_runtime/gpu/gpu_init.h" 41 gpu::cuda::ScopedActivateExecutorContext scoped_activation{stream_exec_};
|
/external/tensorflow/tensorflow/tools/ci_build/windows/ |
libtensorflow_gpu.sh | 56 zip -j ${DIR}/libtensorflow_jni-gpu-windows-$(uname -m).zip \ 68 zip -j libtensorflow-gpu-windows-$(uname -m).zip \
|
/device/generic/goldfish/tools/ |
emulator_boot_test.sh | 56 emulator -gpu swiftshader_indirect -no-window -show-kernel -verbose -quit-after-boot $time_out \
|
/external/skia/src/gpu/ |
GrPathRendering.h | 84 * Creates a new gpu path, based on the specified path and stroke and returns it. 89 * @return a new GPU path object. 94 * Creates a range of gpu paths with a common style. 123 * @param GrStyle Common style that the GPU will apply to every path. Note that 125 * descriptor, the GPU style will be applied on top of those 171 GrPathRendering(GrGpu* gpu) : fGpu(gpu) { }
|
/external/skia/src/gpu/gl/ |
GrGLPathRendering.h | 34 GrGLPathRendering(GrGLGpu* gpu); 47 * whether GPU resources should be cleaned up or abandoned when this is called. 124 GrGLGpu* gpu();
|
/external/skia/src/gpu/ops/ |
GrSemaphoreOp.cpp | 33 state->gpu()->insertSemaphore(fSemaphore, fForceFlush); 58 state->gpu()->waitSemaphore(fSemaphore);
|
/external/skqp/src/gpu/ |
GrPathRendering.h | 84 * Creates a new gpu path, based on the specified path and stroke and returns it. 89 * @return a new GPU path object. 94 * Creates a range of gpu paths with a common style. 123 * @param GrStyle Common style that the GPU will apply to every path. Note that 125 * descriptor, the GPU style will be applied on top of those 171 GrPathRendering(GrGpu* gpu) : fGpu(gpu) { }
|
/external/skqp/src/gpu/ops/ |
GrSemaphoreOp.cpp | 33 state->gpu()->insertSemaphore(fSemaphore, fForceFlush); 58 state->gpu()->waitSemaphore(fSemaphore);
|