HomeSort by relevance Sort by last modified time
    Searched refs:allocators (Results 1 - 10 of 10) sorted by null

  /external/skia/tests/
GrAllocatorTest.cpp 73 // Test combinations of allocators with and without stack storage and with different block
75 SkTArray<GrTAllocator<C>*> allocators; local
77 allocators.push_back(&a1);
79 allocators.push_back(&a2);
81 allocators.push_back(&a5);
84 allocators.push_back(&a1);
86 allocators.push_back(&sa3);
88 allocators.push_back(&sa4);
90 for (int i = 0; i < allocators.count(); ++i) {
91 check_allocator(allocators[i], 0, 0, reporter)
    [all...]
  /external/skqp/tests/
GrAllocatorTest.cpp 73 // Test combinations of allocators with and without stack storage and with different block
75 SkTArray<GrTAllocator<C>*> allocators; local
77 allocators.push_back(&a1);
79 allocators.push_back(&a2);
81 allocators.push_back(&a5);
84 allocators.push_back(&a1);
86 allocators.push_back(&sa3);
88 allocators.push_back(&sa4);
90 for (int i = 0; i < allocators.count(); ++i) {
91 check_allocator(allocators[i], 0, 0, reporter)
    [all...]
  /device/google/cuttlefish_common/host/commands/launch/
vsoc_shared_memory.cc 109 const std::vector<VSoCRegionAllocator>& allocators,
118 header->region_count = allocators.size();
121 for (size_t idx = 0; idx < allocators.size(); ++idx) {
122 region_idx_by_name[allocators[idx].region_layout->region_name()] = idx;
128 for (size_t idx = 0; idx < allocators.size(); ++idx) {
130 const auto& region = *allocators[idx].region_layout;
131 WriteRegionDescription(shmem_region_desc, allocators[idx]);
166 std::vector<VSoCRegionAllocator> allocators; local
171 allocators.emplace_back(*layout, file_size /* offset */,
175 file_size += allocators.back().region_size
    [all...]
  /external/libcxx/test/support/
debug_mode_helper.h 372 CHECKPOINT("testing swap with non-equal allocators");
  /external/python/cpython3/Modules/
_tracemalloc.c 30 } allocators; variable in typeref:struct:__anon37322
297 return allocators.raw.malloc(allocators.raw.ctx, size);
303 allocators.raw.free(allocators.raw.ctx, ptr);
968 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &allocators.raw);
1103 alloc.ctx = &allocators.raw;
1104 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &allocators.raw);
1113 alloc.ctx = &allocators.mem;
1114 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &allocators.mem)
    [all...]
  /bionic/libc/bionic/
bionic_allocator.cpp 257 BionicSmallObjectAllocator* allocators = local
262 new (allocators + i) BionicSmallObjectAllocator(type, 1 << type);
265 allocators_ = allocators;
  /external/libxcam/wrapper/gstreamer/
gstxcambufferpool.cpp 39 #include <gst/allocators/gstdmabuf.h>
gstxcamfilter.cpp 25 #include <gst/allocators/gstdmabuf.h>
    [all...]
  /external/tensorflow/tensorflow/contrib/verbs/
README.md 30 3. Following HKUST research on the use of GPU direct, and their [GDR implementation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gdr/README.md), there is a smart way to benefit from the TensorFlow allocation theme which is mostly pool based, i.e allocators pre-allocate a large memory block, and allocate the tensors from there. By attaching a custom Visitor to relevant allocators, we can do a single registration of the entire memory block, which zeros the registration overhead. Once the block is registered, each new tensor allocated will be at a registered address, which will allow us to do direct RDMA writes to it.
  /cts/tests/tests/graphics/src/android/graphics/cts/
ImageDecoderTest.java 335 private static final int[] ALLOCATORS = new int[] {
349 for (int allocator : ALLOCATORS) {
383 for (int allocator : ALLOCATORS) {
1083 int allocators[] = new int[] { ImageDecoder.ALLOCATOR_DEFAULT, local
1797 int[] allocators = { ImageDecoder.ALLOCATOR_HARDWARE, ImageDecoder.ALLOCATOR_DEFAULT }; local
    [all...]

Completed in 572 milliseconds