HomeSort by relevance Sort by last modified time
    Searched refs:gpu (Results 201 - 225 of 554) sorted by null

1 2 3 4 5 6 7 891011>>

  /external/skia/src/gpu/vk/
GrVkCopyPipeline.h 17 static GrVkCopyPipeline* Create(GrVkGpu* gpu,
GrVkRenderPass.h 42 void initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target);
43 void init(const GrVkGpu* gpu,
48 void init(const GrVkGpu* gpu,
123 void init(const GrVkGpu* gpu,
129 void freeGPUData(const GrVkGpu* gpu) const override;
GrVkRenderTarget.h 81 GrVkRenderTarget(GrVkGpu* gpu,
89 GrVkRenderTarget(GrVkGpu* gpu,
111 void createFramebuffer(GrVkGpu* gpu);
118 GrVkRenderTarget(GrVkGpu* gpu,
127 GrVkRenderTarget(GrVkGpu* gpu,
  /external/skqp/src/gpu/ops/
GrStencilPathOp.cpp 24 state->gpu()->pathRendering()->stencilPath(args, fPath.get());
  /external/skqp/src/gpu/vk/
GrVkCopyPipeline.h 17 static GrVkCopyPipeline* Create(GrVkGpu* gpu,
GrVkRenderPass.h 42 void initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target);
43 void init(const GrVkGpu* gpu,
48 void init(const GrVkGpu* gpu,
123 void init(const GrVkGpu* gpu,
129 void freeGPUData(const GrVkGpu* gpu) const override;
GrVkRenderTarget.h 81 GrVkRenderTarget(GrVkGpu* gpu,
89 GrVkRenderTarget(GrVkGpu* gpu,
111 void createFramebuffer(GrVkGpu* gpu);
118 GrVkRenderTarget(GrVkGpu* gpu,
127 GrVkRenderTarget(GrVkGpu* gpu,
  /external/tensorflow/tensorflow/compiler/xla/service/gpu/
gpu_layout_assignment.h 24 namespace gpu { namespace in namespace:xla
26 // GPU-specific layout assignment pass which preassigns layouts to satisfy
46 } // namespace gpu
hlo_schedule.h 22 #include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
28 namespace gpu { namespace in namespace:xla
65 } // namespace gpu
infeed_thunk.h 20 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
21 #include "tensorflow/compiler/xla/service/gpu/thunk.h"
27 namespace gpu { namespace in namespace:xla
54 } // namespace gpu
  /external/tensorflow/tensorflow/stream_executor/host/
host_platform.cc 29 namespace gpu = ::perftools::gputools;
96 std::unique_ptr<gpu::Platform> platform(new gpu::host::HostPlatform);
97 SE_CHECK_OK(gpu::MultiPlatformManager::RegisterPlatform(std::move(platform)));
  /external/skia/src/gpu/
GrBufferAllocPool.h 53 * @param gpu The GrGpu used to create the buffers.
59 GrBufferAllocPool(GrGpu* gpu,
157 * @param gpu The GrGpu used to create the vertex buffers.
159 GrVertexBufferAllocPool(GrGpu* gpu);
233 * @param gpu The GrGpu used to create the index buffers.
235 GrIndexBufferAllocPool(GrGpu* gpu);
GrRenderTarget.cpp 21 GrRenderTarget::GrRenderTarget(GrGpu* gpu, const GrSurfaceDesc& desc,
24 : INHERITED(gpu, desc)
31 gpu->caps()->maxWindowRectangles() > 0);
GrGpuResource.cpp 16 static inline GrResourceCache* get_resource_cache(GrGpu* gpu) {
17 SkASSERT(gpu);
18 SkASSERT(gpu->getContext());
19 SkASSERT(gpu->getContext()->contextPriv().getResourceCache());
20 return gpu->getContext()->contextPriv().getResourceCache();
23 GrGpuResource::GrGpuResource(GrGpu* gpu)
25 , fGpu(gpu)
  /external/skia/src/gpu/mock/
GrMockGpuCommandBuffer.h 34 GrMockGpuRTCommandBuffer(GrMockGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin)
36 , fGpu(gpu) {
39 GrGpu* gpu() override { return fGpu; }
  /external/skqp/src/gpu/
GrBufferAllocPool.h 53 * @param gpu The GrGpu used to create the buffers.
59 GrBufferAllocPool(GrGpu* gpu,
157 * @param gpu The GrGpu used to create the vertex buffers.
159 GrVertexBufferAllocPool(GrGpu* gpu);
233 * @param gpu The GrGpu used to create the index buffers.
235 GrIndexBufferAllocPool(GrGpu* gpu);
GrGpuResource.cpp 16 static inline GrResourceCache* get_resource_cache(GrGpu* gpu) {
17 SkASSERT(gpu);
18 SkASSERT(gpu->getContext());
19 SkASSERT(gpu->getContext()->contextPriv().getResourceCache());
20 return gpu->getContext()->contextPriv().getResourceCache();
23 GrGpuResource::GrGpuResource(GrGpu* gpu)
25 , fGpu(gpu)
  /external/skqp/src/gpu/mock/
GrMockGpuCommandBuffer.h 34 GrMockGpuRTCommandBuffer(GrMockGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin)
36 , fGpu(gpu) {
39 GrGpu* gpu() override { return fGpu; }
  /external/tensorflow/tensorflow/core/kernels/
diag_op_test.cc 45 BM_DiagDev(N, int, DT_INT32, gpu); \
46 BM_DiagDev(N, float, DT_FLOAT, gpu); \
47 BM_DiagDev(N, std::complex<float>, DT_COMPLEX64, gpu);
  /external/skia/src/gpu/gl/
GrGLGpuCommandBuffer.h 22 GrGLGpuTextureCommandBuffer(GrGLGpu* gpu, GrTexture* texture, GrSurfaceOrigin origin)
24 , fGpu(gpu) {
49 * are immediately sent to the gpu to execute. Thus all the commands in this class are simply
53 GrGLGpuRTCommandBuffer(GrGLGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
57 , fGpu(gpu)
85 GrGpu* gpu() override { return fGpu; }
  /external/skqp/src/gpu/gl/
GrGLGpuCommandBuffer.h 22 GrGLGpuTextureCommandBuffer(GrGLGpu* gpu, GrTexture* texture, GrSurfaceOrigin origin)
24 , fGpu(gpu) {
49 * are immediately sent to the gpu to execute. Thus all the commands in this class are simply
53 GrGLGpuRTCommandBuffer(GrGLGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
57 , fGpu(gpu)
85 GrGpu* gpu() override { return fGpu; }
  /external/tensorflow/tensorflow/compiler/jit/kernels/
xla_launch_op.cc 39 namespace gpu = perftools::gputools;
48 XlaAllocator(const gpu::Platform* platform, OpKernelContext* op_context);
50 xla::StatusOr<gpu::DeviceMemoryBase> Allocate(int device_ordinal, uint64 size,
52 Status Deallocate(int device_ordinal, gpu::DeviceMemoryBase* mem) override;
61 Status MakeTensorFromBuffer(gpu::DeviceMemoryBase buffer, DataType dtype,
64 // The Tensorflow BFC allocator used on GPU allows host-side deallocation
65 // before GPU execution takes place. Tensorflow uses the ordering of the main
68 // support for multiple GPU streams or allocators with different ordering
82 XlaAllocator::XlaAllocator(const gpu::Platform* platform,
88 xla::StatusOr<gpu::DeviceMemoryBase> XlaAllocator::Allocate
    [all...]
  /external/skia/tests/
PrimitiveProcessorTest.cpp 8 // This is a GPU-backend specific test. It relies on static intializers to work
115 GrGpu* gpu = context->contextPriv().getGpu(); local
134 REPORTER_ASSERT(reporter, gpu->stats()->numDraws() == 0);
135 REPORTER_ASSERT(reporter, gpu->stats()->numFailedDraws() == 0);
145 REPORTER_ASSERT(reporter, gpu->stats()->numDraws() == 1);
146 REPORTER_ASSERT(reporter, gpu->stats()->numFailedDraws() == 0);
152 REPORTER_ASSERT(reporter, gpu->stats()->numDraws() == 0);
153 REPORTER_ASSERT(reporter, gpu->stats()->numFailedDraws() == 1);
  /external/skqp/tests/
PrimitiveProcessorTest.cpp 8 // This is a GPU-backend specific test. It relies on static intializers to work
115 GrGpu* gpu = context->contextPriv().getGpu(); local
134 REPORTER_ASSERT(reporter, gpu->stats()->numDraws() == 0);
135 REPORTER_ASSERT(reporter, gpu->stats()->numFailedDraws() == 0);
145 REPORTER_ASSERT(reporter, gpu->stats()->numDraws() == 1);
146 REPORTER_ASSERT(reporter, gpu->stats()->numFailedDraws() == 0);
152 REPORTER_ASSERT(reporter, gpu->stats()->numDraws() == 0);
153 REPORTER_ASSERT(reporter, gpu->stats()->numFailedDraws() == 1);
  /external/tensorflow/tensorflow/core/common_runtime/gpu/
gpu_device.h 30 #include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
31 #include "tensorflow/core/common_runtime/gpu/gpu_id.h"
32 #include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
33 #include "tensorflow/core/common_runtime/gpu/gpu_id_utils.h"
61 // GPU devices require the Op Compute method to save a reference to
90 // Returns the CUDA GPU id of this device within the native driver system;
91 // e.g., for CUDA this is the ordinal of the GPU within the system.
96 gpu::StreamExecutor* executor() const { return executor_; }
102 gpu::StreamExecutor* executor_; // not owned
106 gpu::Stream* compute = nullptr
    [all...]

Completed in 862 milliseconds

1 2 3 4 5 6 7 891011>>