Home | History | Annotate | Download | only in batching_util

Lines Matching defs:queue

161     std::unique_ptr<BatchScheduler<FakeTask>> queue;
162 TF_ASSERT_OK(scheduler->AddQueue(queue_options, callback, &queue));
165 TF_ASSERT_OK(ScheduleTask(3, queue.get()));
166 TF_ASSERT_OK(ScheduleTask(5, queue.get()));
169 TF_ASSERT_OK(ScheduleTask(3 /* (3+5) + 3 > 10 */, queue.get()));
170 TF_ASSERT_OK(ScheduleTask(1, queue.get()));
171 TF_ASSERT_OK(ScheduleTask(6, queue.get()));
223 std::unique_ptr<BatchScheduler<FakeTask>> queue;
224 TF_ASSERT_OK(scheduler->AddQueue(queue_options, callback, &queue));
228 TF_ASSERT_OK(ScheduleTask(1, queue.get()));
238 TF_ASSERT_OK(ScheduleTask(2, queue.get()));
241 TF_ASSERT_OK(ScheduleTask(3, queue.get()));
279 std::unique_ptr<BatchScheduler<FakeTask>> queue;
280 TF_ASSERT_OK(scheduler->AddQueue(queue_options, callback, &queue));
284 TF_ASSERT_OK(ScheduleTask(1, queue.get()));
288 TF_ASSERT_OK(ScheduleTask(2, queue.get()));
325 std::unique_ptr<BatchScheduler<FakeTask>> queue;
326 TF_ASSERT_OK(scheduler->AddQueue(queue_options, callback, &queue));
328 TF_ASSERT_OK(ScheduleTask(1, queue.get()));
330 TF_ASSERT_OK(ScheduleTask(2, queue.get()));
383 // Enqueue a batch-filling task to queue 0, and wait for it to get
389 // Enqueue two more batch-filling tasks to queue 0.
393 // Enqueue one task to queue 1, and then advance the clock so it becomes
394 // eligible for scheduling due to the timeout. Ensure that the queue 1 batch
395 // gets scheduled before the next queue 0 one.
429 std::unique_ptr<BatchScheduler<FakeTask>> queue;
430 TF_ASSERT_OK(scheduler->AddQueue(queue_options, callback, &queue));
431 EXPECT_EQ(2, queue->max_task_size());
432 EXPECT_EQ(0, queue->NumEnqueuedTasks());
433 EXPECT_EQ(max_enqueued_batches * 2, queue->SchedulingCapacity());
436 // we're done testing the maximum queue length.
437 TF_ASSERT_OK(ScheduleTask(2, queue.get()));
439 EXPECT_EQ(0, queue->NumEnqueuedTasks());
444 EXPECT_EQ(i * 2, queue->NumEnqueuedTasks());
445 EXPECT_EQ((max_enqueued_batches - i) * 2, queue->SchedulingCapacity());
446 TF_ASSERT_OK(ScheduleTask(1, queue.get()));
447 EXPECT_EQ((i * 2) + 1, queue->NumEnqueuedTasks());
449 queue->SchedulingCapacity());
450 TF_ASSERT_OK(ScheduleTask(1, queue.get()));
452 EXPECT_EQ(max_enqueued_batches * 2, queue->NumEnqueuedTasks());
453 EXPECT_EQ(0, queue->SchedulingCapacity());
456 Status status = ScheduleTask(1, queue.get());
459 EXPECT_EQ(max_enqueued_batches * 2, queue->NumEnqueuedTasks());
460 EXPECT_EQ(0, queue->SchedulingCapacity());
505 // Clog up queue 0.
514 // Ensure that queue 1 still behaves normally, and lets us process tasks.
522 // Let poor queue 0 drain.
556 std::unique_ptr<BatchScheduler<FakeTask>> queue;
557 TF_ASSERT_OK(scheduler->AddQueue(queue_options, callback, &queue));
559 // Clog up the queue.
561 TF_ASSERT_OK(ScheduleTask(10, queue.get()));
566 TF_ASSERT_OK(ScheduleTask(10, queue.get()));
570 EXPECT_EQ(error::UNAVAILABLE, ScheduleTask(10, queue.get()).code());
572 // Destroy the queue. The destructor should block until all tasks have been
577 [&queue, &destroy_queue_thread_started, &queue_destroyed] {
579 queue = nullptr;