/external/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ |
NonBlockingThreadPool.h | 25 queues_(num_threads), 55 queues_.push_back(new Queue()); 71 for (size_t i = 0; i < threads_.size(); i++) delete queues_[i]; 79 Queue* q = queues_[pt->thread_id]; 84 Queue* q = queues_[Rand(&pt->rand) % queues_.size()]; 126 MaxSizeVector<Queue*> queues_; member in class:Eigen::NonBlockingThreadPoolTempl 140 Queue* q = queues_[thread_id]; 175 const size_t size = queues_.size(); 180 Task t = queues_[victim]->PopBack() [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
typed_queue.h | 44 std::vector<SubQueue> queues_ GUARDED_BY(mu_); 67 queues_.reserve(num_components()); 69 queues_.push_back(SubQueue()); 114 for (const auto& sq : queues_) {
|
fifo_queue.cc | 42 DCHECK_GT(queues_[0].size(), size_t{0}); 45 (*tuple).push_back(*queues_[i][0].AccessTensor(ctx)); 46 queues_[i].pop_front(); 68 if (queues_[0].size() < static_cast<size_t>(capacity_)) { 70 queues_[i].push_back(PersistentTensor(tuple[i])); 127 while (queues_[0].size() < static_cast<size_t>(capacity_)) { 136 queues_[i].push_back(element); 168 const int64 queue_size = queues_[0].size(); 260 int64 queue_size = queues_[0].size(); 282 queues_[j].push_front(element) [all...] |
random_shuffle_queue_op.cc | 64 return queues_[0].size(); 70 // Helper for dequeuing a single random element from queues_. 111 queues_[i].reserve(min_after_dequeue_); 117 DCHECK_GT(queues_[0].size(), size_t{0}); 118 int64 index = generator_() % queues_[0].size(); 121 (*tuple).push_back(*queues_[i][index].AccessTensor(ctx)); 122 queues_[i][index] = queues_[i].back(); 123 queues_[i].pop_back(); 145 if (queues_[0].size() < static_cast<size_t>(capacity_)) [all...] |
priority_queue.cc | 63 DCHECK_GT(queues_[0].size(), 0); 66 PersistentTensor persistent_tensor = gtl::ConsumeTop(&queues_[i]).second; 89 if (queues_[0].size() < static_cast<size_t>(capacity_)) { 99 queues_[i].emplace(priority, PersistentTensor(tuple[i])); 155 while (queues_[0].size() < static_cast<size_t>(capacity_)) { 178 queues_[i].emplace(priority, element); 211 const int32 s = queues_[0].size(); 303 int32 s = queues_[0].size();
|
fifo_queue.h | 53 return queues_[0].size(); 59 // Helper for dequeuing a single element from queues_.
|
priority_queue.h | 73 return queues_[0].size(); 79 // Helper for dequeuing a single element from queues_.
|
padding_fifo_queue.cc | 102 int32 queue_size = queues_[0].size(); 120 queues_[j].push_front(element); 124 if (allow_small_batch && !queues_[0].empty()) { 126 queue_size = queues_[0].size();
|
/device/google/cuttlefish_common/common/vsoc/lib/ |
socket_forward_region_view.cpp | 63 (data()->queues_[connection_id].*ReadDirection) 77 auto& queue_pair = data()->queues_[connection_id]; 87 (data()->queues_[connection_id].*WriteDirection) 96 (data()->queues_[connection_id].*ReadDirection) 102 auto& queue_pair = data()->queues_[connection_id]; 145 for (auto&& queue_pair : data()->queues_) { 176 auto& queue_pair = data()->queues_[connection_id]; 208 return data()->queues_[connection_id].port_; 219 for (auto&& queue_pair : data()->queues_) { 255 for (auto&& queue_pair : data()->queues_) { [all...] |
/device/google/cuttlefish_common/common/vsoc/shm/ |
socket_forward_layout.h | 85 for (auto& i : queues_) { 93 QueuePair queues_[kNumQueues]; member in struct:vsoc::layout::socket_forward::SocketForwardLayout
|
/external/tensorflow/tensorflow/core/kernels/batching_util/ |
shared_batch_scheduler.h | 187 QueueList queues_ GUARDED_BY(mu_); 189 // An iterator over 'queues_', pointing to the queue from which the next 378 if (queues_.empty()) { 424 queues_.push_back(std::move(internal_queue)); 425 if (next_queue_to_schedule_ == queues_.end()) { 426 next_queue_to_schedule_ = queues_.begin(); 435 : options_(options), next_queue_to_schedule_(queues_.end()) { 457 const int num_queues = queues_.size(); 461 DCHECK(next_queue_to_schedule_ != queues_.end()); 480 next_queue_to_schedule_ = queues_.erase(next_queue_to_schedule_) [all...] |
/external/vulkan-validation-layers/tests/ |
vktestbinding.cpp | 243 for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++) delete *it; 244 queues_[i].clear(); 321 queues_[GRAPHICS].push_back(new Queue(queue, i)); 325 queues_[COMPUTE].push_back(new Queue(queue, i)); 329 queues_[DMA].push_back(new Queue(queue, i)); 336 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
|
vktestbinding.h | 213 const std::vector<Queue *> &graphics_queues() const { return queues_[GRAPHICS]; } 214 const std::vector<Queue *> &compute_queues() { return queues_[COMPUTE]; } 215 const std::vector<Queue *> &dma_queues() { return queues_[DMA]; } 275 std::vector<Queue *> queues_[QUEUE_COUNT]; member in class:vk_testing::Device [all...] |
/prebuilts/ndk/r16/sources/third_party/vulkan/src/tests/ |
vktestbinding.cpp | 241 for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++) 243 queues_[i].clear(); 325 queues_[GRAPHICS].push_back(new Queue(queue, i)); 329 queues_[COMPUTE].push_back(new Queue(queue, i)); 333 queues_[DMA].push_back(new Queue(queue, i)); 340 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
|
vktestbinding.h | 159 const std::vector<Queue *> &graphics_queues() const { return queues_[GRAPHICS]; } 160 const std::vector<Queue *> &compute_queues() { return queues_[COMPUTE]; } 161 const std::vector<Queue *> &dma_queues() { return queues_[DMA]; } 218 std::vector<Queue *> queues_[QUEUE_COUNT]; member in class:vk_testing::Device
|