| /external/deqp/external/vulkancts/modules/vulkan/synchronization/ |
| vktSynchronizationBasicEventTests.cpp | 84 const VkQueue queue = context.getUniversalQueue(); local 106 VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, DE_NULL)); 107 VK_CHECK(vk.queueWaitIdle(queue)); 116 VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, DE_NULL)); 117 VK_CHECK(vk.queueWaitIdle(queue)); 129 const VkQueue queue = context.getUniversalQueue(); local 158 VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence)); 160 return tcu::TestStatus::fail("Queue should not end execution"); 166 return tcu::TestStatus::fail("Queue should end execution"); 176 const VkQueue queue = context.getUniversalQueue() local 216 const VkQueue queue = context.getUniversalQueue(); local 276 const VkQueue queue = context.getUniversalQueue(); local [all...] |
| vktSynchronizationBasicSemaphoreTests.cpp | 50 const VkQueue queue = context.getUniversalQueue(); local 92 VK_CHECK(vk.queueSubmit(queue, 2u, submitInfo, *fence)); 95 return tcu::TestStatus::fail("Basic semaphore tests with one queue failed"); 97 return tcu::TestStatus::pass("Basic semaphore tests with one queue passed"); 107 VkQueue queue; member in struct:vkt::synchronization::__anon17952::Queues 193 vk.getDeviceQueue(*logicalDevice, queues[queueReqNdx].queueFamilyIndex, queueReqNdx, &queues[queueReqNdx].queue); 195 vk.getDeviceQueue(*logicalDevice, queues[queueReqNdx].queueFamilyIndex, 0u, &queues[queueReqNdx].queue); 232 VK_CHECK(vk.queueSubmit(queues[FIRST].queue, 1u, &submitInfo[FIRST], *fence[FIRST])); 233 VK_CHECK(vk.queueSubmit(queues[SECOND].queue, 1u, &submitInfo[SECOND], *fence[SECOND])); 236 return tcu::TestStatus::fail("Basic semaphore tests with multi queue failed") [all...] |
| /external/dhcpcd-6.8.2/ |
| eloop.h | 60 int queue; member in struct:eloop_timeout 98 int eloop_q_timeout_add_sec(struct eloop_ctx *, int queue, 100 int eloop_q_timeout_add_tv(struct eloop_ctx *, int queue,
|
| /external/guava/guava/src/com/google/common/base/internal/ |
| Finalizer.java | 63 * @param queue a reference queue that the thread will poll. 70 ReferenceQueue<Object> queue, 85 Finalizer finalizer = new Finalizer(finalizableReferenceClass, queue, frqReference); 104 private final ReferenceQueue<Object> queue; field in class:Finalizer 112 ReferenceQueue<Object> queue, 114 this.queue = queue; 124 * Loops continuously, pulling references off the queue and cleaning them up. 131 if (!cleanUp(queue.remove())) [all...] |
| /external/icu/icu4j/main/classes/core/src/com/ibm/icu/impl/ |
| ICUNotifier.java | 96 * Queue a notification on the notification thread for the current 108 notifyThread.queue(listeners.toArray(new EventListener[listeners.size()])); 118 private final List<EventListener[]> queue = new ArrayList<EventListener[]>(); field in class:ICUNotifier.NotifyThread 125 * Queue the notification on the thread. 127 public void queue(EventListener[] list) { method in class:ICUNotifier.NotifyThread 129 queue.add(list); 144 while (queue.isEmpty()) { 147 list = queue.remove(0);
|
| /external/ims/rcs/presencepolling/src/com/android/service/ims/presence/ |
| CapabilityPolling.java | 549 PollingsQueue queue = PollingsQueue.getInstance(mContext); local 550 if (queue != null) { 551 queue.setCapabilityPolling(this); 552 queue.add(type, list); 657 PollingsQueue queue = PollingsQueue.getInstance(null); local 658 if (queue != null) { 659 queue.clear(); [all...] |
| /external/libnl/lib/netfilter/ |
| queue_obj.c | 2 * lib/netfilter/queue_obj.c Netfilter Queue 14 * @defgroup queue Queue 21 #include <netlink/netfilter/queue.h> 33 struct nfnl_queue *queue = (struct nfnl_queue *) a; local 38 if (queue->ce_mask & QUEUE_ATTR_GROUP) 39 nl_dump(p, "group=%u ", queue->queue_group); 41 if (queue->ce_mask & QUEUE_ATTR_MAXLEN) 42 nl_dump(p, "maxlen=%u ", queue->queue_maxlen); 44 if (queue->ce_mask & QUEUE_ATTR_COPY_MODE [all...] |
| /external/linux-kselftest/tools/testing/selftests/mqueue/ |
| mq_open_tests.c | 18 * open a posix message queue and then reports whether or not they 40 " path Path name of the message queue to create\n" 56 mqd_t queue = -1; variable 87 if (queue != -1) 88 if (mq_close(queue)) 155 printf("Current rlimit value for POSIX message queue bytes is " 166 printf("Temporarily lowering default queue parameters " 178 printf("Temporarily lowering maximum queue parameters " 182 "queue parameters to the maximum queue " [all...] |
| /external/mesa3d/src/gallium/drivers/svga/ |
| svga_state_rss.c | 52 svga_queue_rs( &queue, SVGA3D_RS_##token, value ); \ 62 svga_queue_rs( &queue, SVGA3D_RS_##token, value ); \ 87 struct rs_queue queue; local 90 queue.rs_count = 0; 279 if (queue.rs_count) { 284 queue.rs_count ) != PIPE_OK) 288 queue.rs, 289 queue.rs_count * sizeof queue.rs[0]);
|
| svga_state_tss.c | 80 struct bind_queue *queue) 128 queue->bind[queue->bind_count].unit = unit; 129 queue->bind[queue->bind_count].view = view; 130 queue->bind_count++; 149 struct bind_queue queue; local 154 queue.bind_count = 0; 162 &queue); 175 &queue); 232 struct bind_queue queue; local 386 struct ts_queue queue; local [all...] |
| /external/mesa3d/src/gallium/drivers/vc4/ |
| vc4_qpu_emit.c | 47 queue(struct qblock *block, uint64_t inst) function 142 queue(block, 186 queue(block, qpu_a_FMAX(qpu_rb(14), *src0, *src0)); 188 queue(block, qpu_a_MOV(qpu_rb(14), *src0)); 199 queue(block, qpu_a_MOV(qpu_ra(14), *src0)); 230 queue(block, qpu_a_MOV(dst, qpu_r4())); 232 queue(block, qpu_a_MOV(qpu_ra(QPU_W_NOP), qpu_r4())); 427 queue(block, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP), 431 queue(block, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT), 435 queue(block, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP) [all...] |
| /external/okhttp/samples/crawler/src/main/java/com/squareup/okhttp/sample/ |
| Crawler.java | 45 private final LinkedBlockingQueue<HttpUrl> queue = new LinkedBlockingQueue<>(); field in class:Crawler 69 for (HttpUrl url; (url = queue.take()) != null; ) { 110 if (link != null) queue.add(link); 128 crawler.queue.add(HttpUrl.parse(args[1]));
|
| /external/python/cpython2/Modules/ |
| termios.c | 251 "tcflush(fd, queue) -> None\n\ 254 The queue selector specifies which queue: termios.TCIFLUSH for the input\n\ 255 queue, termios.TCOFLUSH for the output queue, or termios.TCIOFLUSH for\n\ 261 int fd, queue; local 264 fdconv, &fd, &queue)) 266 if (tcflush(fd, queue) == -1)
|
| /external/python/cpython3/Modules/ |
| termios.c | 244 "tcflush(fd, queue) -> None\n\ 247 The queue selector specifies which queue: termios.TCIFLUSH for the input\n\ 248 queue, termios.TCOFLUSH for the output queue, or termios.TCIOFLUSH for\n\ 254 int fd, queue; local 257 fdconv, &fd, &queue)) 259 if (tcflush(fd, queue) == -1)
|
| /external/skia/src/gpu/vk/ |
| GrVkBackendContext.cpp | 165 // query to get the initial queue props size 175 // now get the actual queue props 180 // iterate to find the graphics queue 194 // iterate to find the present queue, if needed 210 // Just setting this so we end up make a single queue for graphics since there was no 211 // request for a present queue. 254 // Here we assume no need for swapchain queue 306 VkQueue queue; local 307 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); 313 ctx->fQueue = queue; [all...] |
| /external/skqp/src/gpu/vk/ |
| GrVkBackendContext.cpp | 165 // query to get the initial queue props size 175 // now get the actual queue props 180 // iterate to find the graphics queue 194 // iterate to find the present queue, if needed 210 // Just setting this so we end up make a single queue for graphics since there was no 211 // request for a present queue. 254 // Here we assume no need for swapchain queue 306 VkQueue queue; local 307 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); 313 ctx->fQueue = queue; [all...] |
| /external/squashfs-tools/squashfs-tools/ |
| caches-queues-lists.c | 39 struct queue *queue_init(int size) 41 struct queue *queue = malloc(sizeof(struct queue)); local 43 if(queue == NULL) 50 queue->data = malloc(sizeof(void *) * (size + 1)); 51 if(queue->data == NULL) 54 queue->size = size + 1; 55 queue->readp = queue->writep = 0 156 struct seq_queue *queue = malloc(sizeof(struct seq_queue)); local [all...] |
| /external/tensorflow/tensorflow/core/framework/ |
| rendezvous.cc | 166 ItemQueue* queue = &table_[key_hash]; variable 167 if (queue->empty() || queue->front()->IsSendValue()) { 169 // into the queue. The waiter will pick it up when arrives. 178 queue->push_back(item); 184 Item* item = queue->front(); 185 queue->pop_front(); 210 ItemQueue* queue = &table_[key_hash]; variable 211 if (queue->empty() || !queue->front()->IsSendValue()) [all...] |
| /external/tensorflow/tensorflow/core/kernels/batching_util/ |
| adaptive_shared_batch_scheduler_test.cc | 193 // Queue must have max_enqueued_batchs > 1. 201 // Queue must have max_batch_size > 0. 267 std::unique_ptr<BatchScheduler<FakeTask>> queue; local 280 TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue)); 286 TF_ASSERT_OK(ScheduleTask(900 + i, queue.get())); 335 std::unique_ptr<BatchScheduler<FakeTask>> queue; local 348 TF_ASSERT_OK(scheduler->AddQueue({}, queue_callback, &queue)); 354 TF_ASSERT_OK(ScheduleTask(900 + i, queue.get())); 403 std::unique_ptr<BatchScheduler<FakeTask>> queue; local 418 TF_ASSERT_OK(scheduler->AddQueue(queue_options, queue_callback, &queue)); 476 std::unique_ptr<BatchScheduler<FakeTask>> queue; local 516 std::unique_ptr<BatchScheduler<FakeTask>> queue; local [all...] |
| /external/tensorflow/tensorflow/core/kernels/ |
| random_shuffle_queue_op.cc | 293 // queue that is used internally by the reader and is not 305 // queue. As long as we keep the generality that TensorFlow Ops 310 // an optimized case where the queue 'knows' what attributes to 344 // Restore already-dequeued elements to the queue. 364 // Request all remaining elements in the queue. 372 // to add elements to the queue. 449 "Shared queue '", name_, "' has min_after_dequeue ", min_after_dequeue_, 460 "Shared queue '", name_, "' has random seeds (", original_seed_, ", ", 471 // Defines a RandomShuffleQueueOp, which produces a Queue (specifically, one 497 RandomShuffleQueue* queue = new RandomShuffleQueue local [all...] |
| reader_ops.cc | 82 QueueInterface* queue; variable 84 GetResourceFromContext(context, "queue_handle", &queue)); 85 core::ScopedUnref unref_me(queue); 95 reader->Read(queue, &key_scalar(), &value_scalar(), context); 108 QueueInterface* queue; variable 115 GetResourceFromContext(context, "queue_handle", &queue)); 116 core::ScopedUnref unref_me(queue); 124 reader->ReadUpTo(num_records, queue, &keys_vec, &values_vec, context);
|
| /external/tensorflow/tensorflow/java/src/test/java/org/tensorflow/ |
| OperationBuilderTest.java | 195 Output<?> queue = local 196 g.opBuilder("FIFOQueue", "queue") 201 assertTrue(hasNode(g, "queue")); 206 .addInput(queue)
|
| /external/v8/benchmarks/ |
| richards.js | 51 var queue = new Packet(null, ID_WORKER, KIND_WORK); 52 queue = new Packet(queue, ID_WORKER, KIND_WORK); 53 scheduler.addWorkerTask(ID_WORKER, 1000, queue); 55 queue = new Packet(null, ID_DEVICE_A, KIND_DEVICE); 56 queue = new Packet(queue, ID_DEVICE_A, KIND_DEVICE); 57 queue = new Packet(queue, ID_DEVICE_A, KIND_DEVICE); 58 scheduler.addHandlerTask(ID_HANDLER_A, 2000, queue); 241 Scheduler.prototype.queue = function (packet) { method in class:Scheduler [all...] |
| /external/wayland/tests/ |
| queue-test.c | 111 struct wl_event_queue *queue; local 119 queue = wl_display_create_queue(state.display); 120 assert(queue); 126 wl_proxy_set_queue((struct wl_proxy *) callback1, queue); 131 wl_proxy_set_queue((struct wl_proxy *) state.callback2, queue); 136 ret = wl_display_dispatch_queue(state.display, queue); 138 wl_event_queue_destroy(queue); 155 /* Test that doing a roundtrip on a queue only the events on that 156 * queue get dispatched. */ 160 struct wl_event_queue *queue; local 211 struct wl_event_queue *queue; local 261 struct wl_event_queue *queue; local [all...] |
| /frameworks/av/media/libstagefright/rtsp/ |
| AAMRAssembler.cpp | 103 List<sp<ABuffer> > *queue = source->queue(); local 105 if (queue->empty()) { 110 List<sp<ABuffer> >::iterator it = queue->begin(); 111 while (it != queue->end()) { 116 it = queue->erase(it); 119 if (queue->empty()) { 124 sp<ABuffer> buffer = *queue->begin(); 138 queue->erase(queue->begin()) [all...] |