HomeSort by relevance Sort by last modified time
    Searched refs:memory_allocator (Results 1 - 25 of 35) sorted by null

1 2

  /external/tensorflow/tensorflow/compiler/xla/service/gpu/
buffer_allocations.h 47 // `memory_allocator` is what this function uses to allocate device memory.
52 DeviceMemoryAllocator* memory_allocator);
62 DeviceMemoryAllocator* memory_allocator() const { return memory_allocator_; } function in class:xla::gpu::BufferAllocations
88 DeviceMemoryAllocator* memory_allocator)
91 memory_allocator_(memory_allocator) {}
buffer_allocations.cc 43 DeviceMemoryAllocator* memory_allocator) {
46 new BufferAllocations(num_buffers, device_ordinal, memory_allocator));
72 TF_ASSIGN_OR_RETURN(buffer_address, memory_allocator->Allocate(
84 "Address returned by memory_allocator->Allocate must be a "
fft_thunk.h 40 DeviceMemoryAllocator* memory_allocator);
fft_thunk.cc 33 int device_ordinal, DeviceMemoryAllocator* memory_allocator)
34 : device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {}
134 buffer_allocations.memory_allocator());
gpu_executable.cc 254 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); local
281 memory_allocator));
284 !memory_allocator->AllowsAsynchronousDeallocation();
cudnn_convolution_algorithm_picker.cc 37 ScratchAllocator(int device_ordinal, DeviceMemoryAllocator* memory_allocator)
38 : device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {}
  /external/libchrome/base/metrics/
persistent_histogram_allocator_unittest.cc 39 allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
140 GlobalHistogramAllocator::Get()->memory_allocator()->Name());
146 GlobalHistogramAllocator::Get()->memory_allocator()->Name());
152 GlobalHistogramAllocator::Get()->memory_allocator()->Name());
208 const_cast<void*>(new_allocator->memory_allocator()->data()),
209 new_allocator->memory_allocator()->size(), 0, 0, "", false));
254 const_cast<void*>(new_allocator->memory_allocator()->data()),
255 new_allocator->memory_allocator()->size(), 0, 0, "", false));
persistent_histogram_allocator.cc 255 : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
831 PersistentMemoryAllocator* memory_allocator =
832 histogram_allocator->memory_allocator();
    [all...]
persistent_histogram_allocator.h 217 PersistentMemoryAllocator* memory_allocator() { function in class:base::PersistentHistogramAllocator
sparse_histogram_unittest.cc 67 allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
  /external/tensorflow/tensorflow/compiler/xla/service/cpu/
cpu_executable.cc 77 DeviceMemoryAllocator* memory_allocator, int device_ordinal,
103 TF_ASSIGN_OR_RETURN((*buffers)[i], memory_allocator->Allocate(
296 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); local
300 memory_allocator, stream->parent()->device_ordinal(), &buffers));
310 TF_RETURN_IF_ERROR(DeallocateTempBuffers(memory_allocator, stream, buffers,
328 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); local
332 memory_allocator, stream->parent()->device_ordinal(), &buffers));
342 buffers_in_result, memory_allocator, stream]() {
348 TF_CHECK_OK(DeallocateTempBuffers(memory_allocator, stream, buffers,
cpu_executable.h 99 DeviceMemoryAllocator* memory_allocator, int device_ordinal,
parallel_cpu_executable.h 99 DeviceMemoryAllocator* memory_allocator, int device_ordinal,
parallel_cpu_executable.cc 327 DeviceMemoryAllocator* memory_allocator, int device_ordinal,
353 TF_ASSIGN_OR_RETURN((*buffers)[i], memory_allocator->Allocate(
461 DeviceMemoryAllocator* memory_allocator = run_options->allocator(); local
469 memory_allocator, stream->parent()->device_ordinal(), &buffers));
508 TF_RETURN_IF_ERROR(memory_allocator->Deallocate(
  /external/tensorflow/tensorflow/compiler/jit/
xla_device_context.cc 39 backend_->memory_allocator()
49 TF_CHECK_OK(backend_->memory_allocator()->Deallocate(device_ordinal_, &dmem));
  /external/tensorflow/tensorflow/compiler/xla/service/
shaped_buffer.h 137 DeviceMemoryAllocator* memory_allocator() const { return allocator_; } function in class:xla::ScopedShapedBuffer
backend.h 84 DeviceMemoryAllocator* memory_allocator() const { function in class:xla::Backend
allocation_tracker.cc 178 TF_RETURN_IF_ERROR(backend_->memory_allocator()->Deallocate(
hlo_runner.cc 135 run_options.set_allocator(backend().memory_allocator());
  /external/tensorflow/tensorflow/compiler/xla/client/
local_client.cc 156 run_options.set_allocator(backend_->memory_allocator());
272 allocator = backend().memory_allocator();
  /external/v8/src/heap/
spaces.cc 219 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
230 return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
580 if (!heap()->memory_allocator()->CommitMemory(start, length,
585 CodeRange* code_range = heap_->memory_allocator()->code_range();
591 heap_->memory_allocator()->ZapBlock(start, length);
601 CodeRange* code_range = heap_->memory_allocator()->code_range();
845 heap()->memory_allocator()->ShrinkChunk(this, unused);
    [all...]
heap.cc 226 return static_cast<size_t>(memory_allocator()->SizeExecutable());
286 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
332 memory_allocator()->Size() / KB,
333 memory_allocator()->Available() / KB);
    [all...]
  /external/tensorflow/tensorflow/compiler/xla/python/
local_computation_builder.cc 104 client->backend().memory_allocator());
199 options.set_allocator(client->backend().memory_allocator());
244 options.set_allocator(client->backend().memory_allocator());
  /external/tensorflow/tensorflow/compiler/xla/tests/
xla_hlo_profile_test.cc 127 DeviceMemoryAllocator* allocator = backend->memory_allocator();
159 exec_run_options.set_allocator(backend->memory_allocator());
  /external/v8/src/extensions/
statistics-extension.cc 120 {heap->memory_allocator()->Size(), "total_committed_bytes"},

Completed in 978 milliseconds

1 2