Home | History | Annotate | Download | only in tests
      1 #include <base/logging.h>
      2 #include <binder/Parcel.h>
      3 #include <dvr/dvr_api.h>
      4 #include <private/dvr/buffer_hub_queue_client.h>
      5 #include <private/dvr/consumer_buffer.h>
      6 #include <private/dvr/producer_buffer.h>
      7 
      8 #include <gtest/gtest.h>
      9 #include <poll.h>
     10 #include <sys/eventfd.h>
     11 
     12 #include <vector>
     13 
     14 // Enable/disable debug logging.
     15 #define TRACE 0
     16 
     17 namespace android {
     18 namespace dvr {
     19 
     20 using pdx::LocalChannelHandle;
     21 using pdx::LocalHandle;
     22 
     23 namespace {
     24 
     25 constexpr uint32_t kBufferWidth = 100;
     26 constexpr uint32_t kBufferHeight = 1;
     27 constexpr uint32_t kBufferLayerCount = 1;
     28 constexpr uint32_t kBufferFormat = HAL_PIXEL_FORMAT_BLOB;
     29 constexpr uint64_t kBufferUsage = GRALLOC_USAGE_SW_READ_RARELY;
     30 constexpr int kTimeoutMs = 100;
     31 constexpr int kNoTimeout = 0;
     32 
     33 class BufferHubQueueTest : public ::testing::Test {
     34  public:
     35   bool CreateProducerQueue(const ProducerQueueConfig& config,
     36                            const UsagePolicy& usage) {
     37     producer_queue_ = ProducerQueue::Create(config, usage);
     38     return producer_queue_ != nullptr;
     39   }
     40 
     41   bool CreateConsumerQueue() {
     42     if (producer_queue_) {
     43       consumer_queue_ = producer_queue_->CreateConsumerQueue();
     44       return consumer_queue_ != nullptr;
     45     } else {
     46       return false;
     47     }
     48   }
     49 
     50   bool CreateQueues(const ProducerQueueConfig& config,
     51                     const UsagePolicy& usage) {
     52     return CreateProducerQueue(config, usage) && CreateConsumerQueue();
     53   }
     54 
     55   void AllocateBuffer(size_t* slot_out = nullptr) {
     56     // Create producer buffer.
     57     auto status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
     58                                                   kBufferLayerCount,
     59                                                   kBufferFormat, kBufferUsage);
     60 
     61     ASSERT_TRUE(status.ok());
     62     size_t slot = status.take();
     63     if (slot_out)
     64       *slot_out = slot;
     65   }
     66 
     67   bool WaitAndHandleOnce(BufferHubQueue* queue, int timeout_ms) {
     68     pollfd pfd{queue->queue_fd(), POLLIN, 0};
     69     int ret;
     70     do {
     71       ret = poll(&pfd, 1, timeout_ms);
     72     } while (ret == -1 && errno == EINTR);
     73 
     74     if (ret < 0) {
     75       ALOGW("Failed to poll queue %d's event fd, error: %s.", queue->id(),
     76             strerror(errno));
     77       return false;
     78     } else if (ret == 0) {
     79       return false;
     80     }
     81     return queue->HandleQueueEvents();
     82   }
     83 
     84  protected:
     85   ProducerQueueConfigBuilder config_builder_;
     86   std::unique_ptr<ProducerQueue> producer_queue_;
     87   std::unique_ptr<ConsumerQueue> consumer_queue_;
     88 };
     89 
     90 TEST_F(BufferHubQueueTest, TestDequeue) {
     91   const int64_t nb_dequeue_times = 16;
     92 
     93   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
     94 
     95   // Allocate only one buffer.
     96   AllocateBuffer();
     97 
     98   // But dequeue multiple times.
     99   for (int64_t i = 0; i < nb_dequeue_times; i++) {
    100     size_t slot;
    101     LocalHandle fence;
    102     DvrNativeBufferMetadata mi, mo;
    103 
    104     // Producer gains a buffer.
    105     auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    106     EXPECT_TRUE(p1_status.ok());
    107     auto p1 = p1_status.take();
    108     ASSERT_NE(p1, nullptr);
    109 
    110     // Producer posts the buffer.
    111     mi.index = i;
    112     EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
    113 
    114     // Consumer acquires a buffer.
    115     auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    116     EXPECT_TRUE(c1_status.ok()) << c1_status.GetErrorMessage();
    117     auto c1 = c1_status.take();
    118     ASSERT_NE(c1, nullptr);
    119     EXPECT_EQ(mi.index, i);
    120     EXPECT_EQ(mo.index, i);
    121 
    122     // Consumer releases the buffer.
    123     EXPECT_EQ(c1->ReleaseAsync(&mi, LocalHandle()), 0);
    124   }
    125 }
    126 
    127 TEST_F(BufferHubQueueTest,
    128        TestDequeuePostedBufferIfNoAvailableReleasedBuffer_withConsumerBuffer) {
    129   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    130 
    131   // Allocate 3 buffers to use.
    132   const size_t test_queue_capacity = 3;
    133   for (int64_t i = 0; i < test_queue_capacity; i++) {
    134     AllocateBuffer();
    135   }
    136   EXPECT_EQ(producer_queue_->capacity(), test_queue_capacity);
    137 
    138   size_t producer_slot, consumer_slot;
    139   LocalHandle fence;
    140   DvrNativeBufferMetadata mi, mo;
    141 
    142   // Producer posts 2 buffers and remember their posted sequence.
    143   std::deque<size_t> posted_slots;
    144   for (int64_t i = 0; i < 2; i++) {
    145     auto p1_status =
    146         producer_queue_->Dequeue(kTimeoutMs, &producer_slot, &mo, &fence, true);
    147     EXPECT_TRUE(p1_status.ok());
    148     auto p1 = p1_status.take();
    149     ASSERT_NE(p1, nullptr);
    150 
    151     // Producer should not be gaining posted buffer when there are still
    152     // available buffers to gain.
    153     auto found_iter =
    154         std::find(posted_slots.begin(), posted_slots.end(), producer_slot);
    155     EXPECT_EQ(found_iter, posted_slots.end());
    156     posted_slots.push_back(producer_slot);
    157 
    158     // Producer posts the buffer.
    159     mi.index = i;
    160     EXPECT_EQ(0, p1->PostAsync(&mi, LocalHandle()));
    161   }
    162 
    163   // Consumer acquires one buffer.
    164   auto c1_status =
    165       consumer_queue_->Dequeue(kTimeoutMs, &consumer_slot, &mo, &fence);
    166   EXPECT_TRUE(c1_status.ok());
    167   auto c1 = c1_status.take();
    168   ASSERT_NE(c1, nullptr);
    169   // Consumer should get the oldest posted buffer. No checks here.
    170   // posted_slots[0] should be in acquired state now.
    171   EXPECT_EQ(mo.index, 0);
    172   // Consumer releases the buffer.
    173   EXPECT_EQ(c1->ReleaseAsync(&mi, LocalHandle()), 0);
    174   // posted_slots[0] should be in released state now.
    175 
    176   // Producer gain and post 2 buffers.
    177   for (int64_t i = 0; i < 2; i++) {
    178     auto p1_status =
    179         producer_queue_->Dequeue(kTimeoutMs, &producer_slot, &mo, &fence, true);
    180     EXPECT_TRUE(p1_status.ok());
    181     auto p1 = p1_status.take();
    182     ASSERT_NE(p1, nullptr);
    183 
    184     // The gained buffer should be the one in released state or the one haven't
    185     // been use.
    186     EXPECT_NE(posted_slots[1], producer_slot);
    187 
    188     mi.index = i + 2;
    189     EXPECT_EQ(0, p1->PostAsync(&mi, LocalHandle()));
    190   }
    191 
    192   // Producer gains a buffer.
    193   auto p1_status =
    194       producer_queue_->Dequeue(kTimeoutMs, &producer_slot, &mo, &fence, true);
    195   EXPECT_TRUE(p1_status.ok());
    196   auto p1 = p1_status.take();
    197   ASSERT_NE(p1, nullptr);
    198 
    199   // The gained buffer should be the oldest posted buffer.
    200   EXPECT_EQ(posted_slots[1], producer_slot);
    201 
    202   // Producer posts the buffer.
    203   mi.index = 4;
    204   EXPECT_EQ(0, p1->PostAsync(&mi, LocalHandle()));
    205 }
    206 
    207 TEST_F(BufferHubQueueTest,
    208        TestDequeuePostedBufferIfNoAvailableReleasedBuffer_noConsumerBuffer) {
    209   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    210 
    211   // Allocate 4 buffers to use.
    212   const size_t test_queue_capacity = 4;
    213   for (int64_t i = 0; i < test_queue_capacity; i++) {
    214     AllocateBuffer();
    215   }
    216   EXPECT_EQ(producer_queue_->capacity(), test_queue_capacity);
    217 
    218   // Post all allowed buffers and remember their posted sequence.
    219   std::deque<size_t> posted_slots;
    220   for (int64_t i = 0; i < test_queue_capacity; i++) {
    221     size_t slot;
    222     LocalHandle fence;
    223     DvrNativeBufferMetadata mi, mo;
    224 
    225     // Producer gains a buffer.
    226     auto p1_status =
    227         producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence, true);
    228     EXPECT_TRUE(p1_status.ok());
    229     auto p1 = p1_status.take();
    230     ASSERT_NE(p1, nullptr);
    231 
    232     // Producer should not be gaining posted buffer when there are still
    233     // available buffers to gain.
    234     auto found_iter = std::find(posted_slots.begin(), posted_slots.end(), slot);
    235     EXPECT_EQ(found_iter, posted_slots.end());
    236     posted_slots.push_back(slot);
    237 
    238     // Producer posts the buffer.
    239     mi.index = i;
    240     EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
    241   }
    242 
    243   // Gain posted buffers in sequence.
    244   const int64_t nb_dequeue_all_times = 2;
    245   for (int j = 0; j < nb_dequeue_all_times; ++j) {
    246     for (int i = 0; i < test_queue_capacity; ++i) {
    247       size_t slot;
    248       LocalHandle fence;
    249       DvrNativeBufferMetadata mi, mo;
    250 
    251       // Producer gains a buffer.
    252       auto p1_status =
    253           producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence, true);
    254       EXPECT_TRUE(p1_status.ok());
    255       auto p1 = p1_status.take();
    256       ASSERT_NE(p1, nullptr);
    257 
    258       // The gained buffer should be the oldest posted buffer.
    259       EXPECT_EQ(posted_slots[i], slot);
    260 
    261       // Producer posts the buffer.
    262       mi.index = i + test_queue_capacity * (j + 1);
    263       EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
    264     }
    265   }
    266 }
    267 
    268 TEST_F(BufferHubQueueTest, TestProducerConsumer) {
    269   const size_t kBufferCount = 16;
    270   size_t slot;
    271   DvrNativeBufferMetadata mi, mo;
    272   LocalHandle fence;
    273 
    274   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    275 
    276   for (size_t i = 0; i < kBufferCount; i++) {
    277     AllocateBuffer();
    278 
    279     // Producer queue has all the available buffers on initialize.
    280     ASSERT_EQ(producer_queue_->count(), i + 1);
    281     ASSERT_EQ(producer_queue_->capacity(), i + 1);
    282 
    283     // Consumer queue has no avaiable buffer on initialize.
    284     ASSERT_EQ(consumer_queue_->count(), 0U);
    285     // Consumer queue does not import buffers until a dequeue is issued.
    286     ASSERT_EQ(consumer_queue_->capacity(), i);
    287     // Dequeue returns timeout since no buffer is ready to consumer, but
    288     // this implicitly triggers buffer import and bump up |capacity|.
    289     auto status = consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
    290     ASSERT_FALSE(status.ok());
    291     ASSERT_EQ(ETIMEDOUT, status.error());
    292     ASSERT_EQ(consumer_queue_->capacity(), i + 1);
    293   }
    294 
    295   // Use eventfd as a stand-in for a fence.
    296   LocalHandle post_fence(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
    297 
    298   for (size_t i = 0; i < kBufferCount; i++) {
    299     // First time there is no buffer available to dequeue.
    300     auto consumer_status =
    301         consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
    302     ASSERT_FALSE(consumer_status.ok());
    303     ASSERT_EQ(consumer_status.error(), ETIMEDOUT);
    304 
    305     // Make sure Producer buffer is POSTED so that it's ready to Accquire
    306     // in the consumer's Dequeue() function.
    307     auto producer_status =
    308         producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    309     ASSERT_TRUE(producer_status.ok());
    310     auto producer = producer_status.take();
    311     ASSERT_NE(nullptr, producer);
    312 
    313     mi.index = static_cast<int64_t>(i);
    314     ASSERT_EQ(producer->PostAsync(&mi, post_fence), 0);
    315 
    316     // Second time the just the POSTED buffer should be dequeued.
    317     consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    318     ASSERT_TRUE(consumer_status.ok());
    319     EXPECT_TRUE(fence.IsValid());
    320 
    321     auto consumer = consumer_status.take();
    322     ASSERT_NE(nullptr, consumer);
    323     ASSERT_EQ(mi.index, mo.index);
    324   }
    325 }
    326 
    327 TEST_F(BufferHubQueueTest, TestInsertBuffer) {
    328   ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
    329 
    330   consumer_queue_ = producer_queue_->CreateConsumerQueue();
    331   ASSERT_TRUE(consumer_queue_ != nullptr);
    332   EXPECT_EQ(producer_queue_->capacity(), 0);
    333   EXPECT_EQ(consumer_queue_->capacity(), 0);
    334 
    335   std::shared_ptr<ProducerBuffer> p1 = ProducerBuffer::Create(
    336       kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage, 0);
    337   ASSERT_TRUE(p1 != nullptr);
    338   ASSERT_EQ(p1->GainAsync(), 0);
    339 
    340   // Inserting a posted buffer will fail.
    341   DvrNativeBufferMetadata meta;
    342   EXPECT_EQ(p1->PostAsync(&meta, LocalHandle()), 0);
    343   auto status_or_slot = producer_queue_->InsertBuffer(p1);
    344   EXPECT_FALSE(status_or_slot.ok());
    345   EXPECT_EQ(status_or_slot.error(), EINVAL);
    346 
    347   // Inserting a gained buffer will succeed.
    348   std::shared_ptr<ProducerBuffer> p2 = ProducerBuffer::Create(
    349       kBufferWidth, kBufferHeight, kBufferFormat, kBufferUsage);
    350   ASSERT_EQ(p2->GainAsync(), 0);
    351   ASSERT_TRUE(p2 != nullptr);
    352   status_or_slot = producer_queue_->InsertBuffer(p2);
    353   EXPECT_TRUE(status_or_slot.ok()) << status_or_slot.GetErrorMessage();
    354   // This is the first buffer inserted, should take slot 0.
    355   size_t slot = status_or_slot.get();
    356   EXPECT_EQ(slot, 0);
    357 
    358   // Wait and expect the consumer to kick up the newly inserted buffer.
    359   WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs);
    360   EXPECT_EQ(consumer_queue_->capacity(), 1ULL);
    361 }
    362 
    363 TEST_F(BufferHubQueueTest, TestRemoveBuffer) {
    364   ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
    365   DvrNativeBufferMetadata mo;
    366 
    367   // Allocate buffers.
    368   const size_t kBufferCount = 4u;
    369   for (size_t i = 0; i < kBufferCount; i++) {
    370     AllocateBuffer();
    371   }
    372   ASSERT_EQ(kBufferCount, producer_queue_->count());
    373   ASSERT_EQ(kBufferCount, producer_queue_->capacity());
    374 
    375   consumer_queue_ = producer_queue_->CreateConsumerQueue();
    376   ASSERT_NE(nullptr, consumer_queue_);
    377 
    378   // Check that buffers are correctly imported on construction.
    379   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    380   EXPECT_EQ(0u, consumer_queue_->count());
    381 
    382   // Dequeue all the buffers and keep track of them in an array. This prevents
    383   // the producer queue ring buffer ref counts from interfering with the tests.
    384   struct Entry {
    385     std::shared_ptr<ProducerBuffer> buffer;
    386     LocalHandle fence;
    387     size_t slot;
    388   };
    389   std::array<Entry, kBufferCount> buffers;
    390 
    391   for (size_t i = 0; i < kBufferCount; i++) {
    392     Entry* entry = &buffers[i];
    393     auto producer_status =
    394         producer_queue_->Dequeue(kTimeoutMs, &entry->slot, &mo, &entry->fence);
    395     ASSERT_TRUE(producer_status.ok());
    396     entry->buffer = producer_status.take();
    397     ASSERT_NE(nullptr, entry->buffer);
    398   }
    399 
    400   // Remove a buffer and make sure both queues reflect the change.
    401   ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[0].slot));
    402   EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
    403 
    404   // As long as the removed buffer is still alive the consumer queue won't know
    405   // its gone.
    406   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    407   EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
    408   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    409 
    410   // Release the removed buffer.
    411   buffers[0].buffer = nullptr;
    412 
    413   // Now the consumer queue should know it's gone.
    414   EXPECT_FALSE(WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs));
    415   ASSERT_EQ(kBufferCount - 1, consumer_queue_->capacity());
    416 
    417   // Allocate a new buffer. This should take the first empty slot.
    418   size_t slot;
    419   AllocateBuffer(&slot);
    420   ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
    421   EXPECT_EQ(buffers[0].slot, slot);
    422   EXPECT_EQ(kBufferCount, producer_queue_->capacity());
    423 
    424   // The consumer queue should pick up the new buffer.
    425   EXPECT_EQ(kBufferCount - 1, consumer_queue_->capacity());
    426   EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
    427   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    428 
    429   // Remove and allocate a buffer.
    430   ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[1].slot));
    431   EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
    432   buffers[1].buffer = nullptr;
    433 
    434   AllocateBuffer(&slot);
    435   ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
    436   EXPECT_EQ(buffers[1].slot, slot);
    437   EXPECT_EQ(kBufferCount, producer_queue_->capacity());
    438 
    439   // The consumer queue should pick up the new buffer but the count shouldn't
    440   // change.
    441   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    442   EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
    443   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    444 
    445   // Remove and allocate a buffer, but don't free the buffer right away.
    446   ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[2].slot));
    447   EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
    448 
    449   AllocateBuffer(&slot);
    450   ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
    451   EXPECT_EQ(buffers[2].slot, slot);
    452   EXPECT_EQ(kBufferCount, producer_queue_->capacity());
    453 
    454   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    455   EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
    456   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    457 
    458   // Release the producer buffer to trigger a POLLHUP event for an already
    459   // removed buffer.
    460   buffers[2].buffer = nullptr;
    461   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    462   EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
    463   EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
    464 }
    465 
    466 TEST_F(BufferHubQueueTest, TestMultipleConsumers) {
    467   // ProducerConfigureBuilder doesn't set Metadata{size}, which means there
    468   // is no metadata associated with this BufferQueue's buffer.
    469   ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
    470 
    471   // Allocate buffers.
    472   const size_t kBufferCount = 4u;
    473   for (size_t i = 0; i < kBufferCount; i++) {
    474     AllocateBuffer();
    475   }
    476   ASSERT_EQ(kBufferCount, producer_queue_->count());
    477 
    478   // Build a silent consumer queue to test multi-consumer queue features.
    479   auto silent_queue = producer_queue_->CreateSilentConsumerQueue();
    480   ASSERT_NE(nullptr, silent_queue);
    481 
    482   // Check that silent queue doesn't import buffers on creation.
    483   EXPECT_EQ(silent_queue->capacity(), 0U);
    484 
    485   // Dequeue and post a buffer.
    486   size_t slot;
    487   LocalHandle fence;
    488   DvrNativeBufferMetadata mi, mo;
    489   auto producer_status =
    490       producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    491   EXPECT_TRUE(producer_status.ok());
    492   auto producer_buffer = producer_status.take();
    493   ASSERT_NE(producer_buffer, nullptr);
    494   EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
    495   // After post, check the number of remaining available buffers.
    496   EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
    497 
    498   // Currently we expect no buffer to be available prior to calling
    499   // WaitForBuffers/HandleQueueEvents.
    500   // TODO(eieio): Note this behavior may change in the future.
    501   EXPECT_EQ(silent_queue->count(), 0U);
    502   EXPECT_FALSE(silent_queue->HandleQueueEvents());
    503   EXPECT_EQ(silent_queue->count(), 0U);
    504 
    505   // Build a new consumer queue to test multi-consumer queue features.
    506   consumer_queue_ = silent_queue->CreateConsumerQueue();
    507   ASSERT_NE(consumer_queue_, nullptr);
    508 
    509   // Check that buffers are correctly imported on construction.
    510   EXPECT_EQ(consumer_queue_->capacity(), kBufferCount);
    511   // Buffers are only imported, but their availability is not checked until
    512   // first call to Dequeue().
    513   EXPECT_EQ(consumer_queue_->count(), 0U);
    514 
    515   // Reclaim released/ignored buffers.
    516   EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
    517 
    518   usleep(10000);
    519   WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
    520   EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
    521 
    522   // Post another buffer.
    523   producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    524   EXPECT_TRUE(producer_status.ok());
    525   producer_buffer = producer_status.take();
    526   ASSERT_NE(producer_buffer, nullptr);
    527   EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
    528 
    529   // Verify that the consumer queue receives it.
    530   size_t consumer_queue_count = consumer_queue_->count();
    531   WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs);
    532   EXPECT_GT(consumer_queue_->count(), consumer_queue_count);
    533 
    534   // Save the current consumer queue buffer count to compare after the dequeue.
    535   consumer_queue_count = consumer_queue_->count();
    536 
    537   // Dequeue and acquire/release (discard) buffers on the consumer end.
    538   auto consumer_status =
    539       consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    540   EXPECT_TRUE(consumer_status.ok());
    541   auto consumer_buffer = consumer_status.take();
    542   ASSERT_NE(consumer_buffer, nullptr);
    543   consumer_buffer->Discard();
    544 
    545   // Buffer should be returned to the producer queue without being handled by
    546   // the silent consumer queue.
    547   EXPECT_LT(consumer_queue_->count(), consumer_queue_count);
    548   EXPECT_EQ(producer_queue_->count(), kBufferCount - 2);
    549 
    550   WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
    551   EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
    552 }
    553 
    554 struct TestUserMetadata {
    555   char a;
    556   int32_t b;
    557   int64_t c;
    558 };
    559 
    560 constexpr uint64_t kUserMetadataSize =
    561     static_cast<uint64_t>(sizeof(TestUserMetadata));
    562 
    563 TEST_F(BufferHubQueueTest, TestUserMetadata) {
    564   ASSERT_TRUE(CreateQueues(
    565       config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
    566 
    567   AllocateBuffer();
    568 
    569   std::vector<TestUserMetadata> user_metadata_list = {
    570       {'0', 0, 0}, {'1', 10, 3333}, {'@', 123, 1000000000}};
    571 
    572   for (auto user_metadata : user_metadata_list) {
    573     size_t slot;
    574     LocalHandle fence;
    575     DvrNativeBufferMetadata mi, mo;
    576 
    577     auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    578     EXPECT_TRUE(p1_status.ok());
    579     auto p1 = p1_status.take();
    580     ASSERT_NE(p1, nullptr);
    581 
    582     // TODO(b/69469185): Test against metadata from consumer once we implement
    583     // release metadata properly.
    584     // EXPECT_EQ(mo.user_metadata_ptr, 0U);
    585     // EXPECT_EQ(mo.user_metadata_size, 0U);
    586 
    587     mi.user_metadata_size = kUserMetadataSize;
    588     mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
    589     EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
    590     auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    591     EXPECT_TRUE(c1_status.ok()) << c1_status.GetErrorMessage();
    592     auto c1 = c1_status.take();
    593     ASSERT_NE(c1, nullptr);
    594 
    595     EXPECT_EQ(mo.user_metadata_size, kUserMetadataSize);
    596     auto out_user_metadata =
    597         reinterpret_cast<TestUserMetadata*>(mo.user_metadata_ptr);
    598     EXPECT_EQ(user_metadata.a, out_user_metadata->a);
    599     EXPECT_EQ(user_metadata.b, out_user_metadata->b);
    600     EXPECT_EQ(user_metadata.c, out_user_metadata->c);
    601 
    602     // When release, empty metadata is also legit.
    603     mi.user_metadata_size = 0U;
    604     mi.user_metadata_ptr = 0U;
    605     c1->ReleaseAsync(&mi, {});
    606   }
    607 }
    608 
    609 TEST_F(BufferHubQueueTest, TestUserMetadataMismatch) {
    610   ASSERT_TRUE(CreateQueues(
    611       config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
    612 
    613   AllocateBuffer();
    614 
    615   TestUserMetadata user_metadata;
    616   size_t slot;
    617   LocalHandle fence;
    618   DvrNativeBufferMetadata mi, mo;
    619   auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    620   EXPECT_TRUE(p1_status.ok());
    621   auto p1 = p1_status.take();
    622   ASSERT_NE(p1, nullptr);
    623 
    624   // Post with mismatched user metadata size will fail. But the producer buffer
    625   // itself should stay untouched.
    626   mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
    627   mi.user_metadata_size = kUserMetadataSize + 1;
    628   EXPECT_EQ(p1->PostAsync(&mi, {}), -E2BIG);
    629   // Post with the exact same user metdata size can success.
    630   mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
    631   mi.user_metadata_size = kUserMetadataSize;
    632   EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
    633 }
    634 
    635 TEST_F(BufferHubQueueTest, TestEnqueue) {
    636   ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
    637                            UsagePolicy{}));
    638   AllocateBuffer();
    639 
    640   size_t slot;
    641   LocalHandle fence;
    642   DvrNativeBufferMetadata mo;
    643   auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    644   ASSERT_TRUE(p1_status.ok());
    645   auto p1 = p1_status.take();
    646   ASSERT_NE(nullptr, p1);
    647 
    648   producer_queue_->Enqueue(p1, slot, 0ULL);
    649   auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    650   ASSERT_FALSE(c1_status.ok());
    651 }
    652 
    653 TEST_F(BufferHubQueueTest, TestAllocateBuffer) {
    654   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    655 
    656   size_t ps1;
    657   AllocateBuffer();
    658   LocalHandle fence;
    659   DvrNativeBufferMetadata mi, mo;
    660   auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &ps1, &mo, &fence);
    661   ASSERT_TRUE(p1_status.ok());
    662   auto p1 = p1_status.take();
    663   ASSERT_NE(p1, nullptr);
    664 
    665   // producer queue is exhausted
    666   size_t ps2;
    667   auto p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
    668   ASSERT_FALSE(p2_status.ok());
    669   ASSERT_EQ(ETIMEDOUT, p2_status.error());
    670 
    671   // dynamically add buffer.
    672   AllocateBuffer();
    673   ASSERT_EQ(producer_queue_->count(), 1U);
    674   ASSERT_EQ(producer_queue_->capacity(), 2U);
    675 
    676   // now we can dequeue again
    677   p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
    678   ASSERT_TRUE(p2_status.ok());
    679   auto p2 = p2_status.take();
    680   ASSERT_NE(p2, nullptr);
    681   ASSERT_EQ(producer_queue_->count(), 0U);
    682   // p1 and p2 should have different slot number
    683   ASSERT_NE(ps1, ps2);
    684 
    685   // Consumer queue does not import buffers until |Dequeue| or |ImportBuffers|
    686   // are called. So far consumer_queue_ should be empty.
    687   ASSERT_EQ(consumer_queue_->count(), 0U);
    688 
    689   int64_t seq = 1;
    690   mi.index = seq;
    691   ASSERT_EQ(p1->PostAsync(&mi, {}), 0);
    692 
    693   size_t cs1, cs2;
    694   auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &cs1, &mo, &fence);
    695   ASSERT_TRUE(c1_status.ok()) << c1_status.GetErrorMessage();
    696   auto c1 = c1_status.take();
    697   ASSERT_NE(c1, nullptr);
    698   ASSERT_EQ(consumer_queue_->count(), 0U);
    699   ASSERT_EQ(consumer_queue_->capacity(), 2U);
    700   ASSERT_EQ(cs1, ps1);
    701 
    702   ASSERT_EQ(p2->PostAsync(&mi, {}), 0);
    703   auto c2_status = consumer_queue_->Dequeue(kTimeoutMs, &cs2, &mo, &fence);
    704   ASSERT_TRUE(c2_status.ok());
    705   auto c2 = c2_status.take();
    706   ASSERT_NE(c2, nullptr);
    707   ASSERT_EQ(cs2, ps2);
    708 }
    709 
    710 TEST_F(BufferHubQueueTest, TestAllocateTwoBuffers) {
    711   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    712   ASSERT_EQ(producer_queue_->capacity(), 0);
    713   auto status = producer_queue_->AllocateBuffers(
    714       kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
    715       kBufferUsage, /*buffer_count=*/2);
    716   ASSERT_TRUE(status.ok());
    717   std::vector<size_t> buffer_slots = status.take();
    718   ASSERT_EQ(buffer_slots.size(), 2);
    719   ASSERT_EQ(producer_queue_->capacity(), 2);
    720 }
    721 
    722 TEST_F(BufferHubQueueTest, TestAllocateZeroBuffers) {
    723   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    724   ASSERT_EQ(producer_queue_->capacity(), 0);
    725   auto status = producer_queue_->AllocateBuffers(
    726       kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
    727       kBufferUsage, /*buffer_count=*/0);
    728   ASSERT_TRUE(status.ok());
    729   std::vector<size_t> buffer_slots = status.take();
    730   ASSERT_EQ(buffer_slots.size(), 0);
    731   ASSERT_EQ(producer_queue_->capacity(), 0);
    732 }
    733 
    734 TEST_F(BufferHubQueueTest, TestUsageSetMask) {
    735   const uint32_t set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
    736   ASSERT_TRUE(
    737       CreateQueues(config_builder_.Build(), UsagePolicy{set_mask, 0, 0, 0}));
    738 
    739   // When allocation, leave out |set_mask| from usage bits on purpose.
    740   auto status = producer_queue_->AllocateBuffer(
    741       kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
    742       kBufferUsage & ~set_mask);
    743   ASSERT_TRUE(status.ok());
    744 
    745   LocalHandle fence;
    746   size_t slot;
    747   DvrNativeBufferMetadata mo;
    748   auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    749   ASSERT_TRUE(p1_status.ok());
    750   auto p1 = p1_status.take();
    751   ASSERT_EQ(p1->usage() & set_mask, set_mask);
    752 }
    753 
    754 TEST_F(BufferHubQueueTest, TestUsageClearMask) {
    755   const uint32_t clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
    756   ASSERT_TRUE(
    757       CreateQueues(config_builder_.Build(), UsagePolicy{0, clear_mask, 0, 0}));
    758 
    759   // When allocation, add |clear_mask| into usage bits on purpose.
    760   auto status = producer_queue_->AllocateBuffer(
    761       kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
    762       kBufferUsage | clear_mask);
    763   ASSERT_TRUE(status.ok());
    764 
    765   LocalHandle fence;
    766   size_t slot;
    767   DvrNativeBufferMetadata mo;
    768   auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    769   ASSERT_TRUE(p1_status.ok());
    770   auto p1 = p1_status.take();
    771   ASSERT_EQ(p1->usage() & clear_mask, 0U);
    772 }
    773 
    774 TEST_F(BufferHubQueueTest, TestUsageDenySetMask) {
    775   const uint32_t deny_set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
    776   ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
    777                            UsagePolicy{0, 0, deny_set_mask, 0}));
    778 
    779   // Now that |deny_set_mask| is illegal, allocation without those bits should
    780   // be able to succeed.
    781   auto status = producer_queue_->AllocateBuffer(
    782       kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
    783       kBufferUsage & ~deny_set_mask);
    784   ASSERT_TRUE(status.ok());
    785 
    786   // While allocation with those bits should fail.
    787   status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
    788                                            kBufferLayerCount, kBufferFormat,
    789                                            kBufferUsage | deny_set_mask);
    790   ASSERT_FALSE(status.ok());
    791   ASSERT_EQ(EINVAL, status.error());
    792 }
    793 
    794 TEST_F(BufferHubQueueTest, TestUsageDenyClearMask) {
    795   const uint32_t deny_clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
    796   ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
    797                            UsagePolicy{0, 0, 0, deny_clear_mask}));
    798 
    799   // Now that clearing |deny_clear_mask| is illegal (i.e. setting these bits are
    800   // mandatory), allocation with those bits should be able to succeed.
    801   auto status = producer_queue_->AllocateBuffer(
    802       kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
    803       kBufferUsage | deny_clear_mask);
    804   ASSERT_TRUE(status.ok());
    805 
    806   // While allocation without those bits should fail.
    807   status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
    808                                            kBufferLayerCount, kBufferFormat,
    809                                            kBufferUsage & ~deny_clear_mask);
    810   ASSERT_FALSE(status.ok());
    811   ASSERT_EQ(EINVAL, status.error());
    812 }
    813 
    814 TEST_F(BufferHubQueueTest, TestQueueInfo) {
    815   static const bool kIsAsync = true;
    816   ASSERT_TRUE(CreateQueues(config_builder_.SetIsAsync(kIsAsync)
    817                                .SetDefaultWidth(kBufferWidth)
    818                                .SetDefaultHeight(kBufferHeight)
    819                                .SetDefaultFormat(kBufferFormat)
    820                                .Build(),
    821                            UsagePolicy{}));
    822 
    823   EXPECT_EQ(producer_queue_->default_width(), kBufferWidth);
    824   EXPECT_EQ(producer_queue_->default_height(), kBufferHeight);
    825   EXPECT_EQ(producer_queue_->default_format(), kBufferFormat);
    826   EXPECT_EQ(producer_queue_->is_async(), kIsAsync);
    827 
    828   EXPECT_EQ(consumer_queue_->default_width(), kBufferWidth);
    829   EXPECT_EQ(consumer_queue_->default_height(), kBufferHeight);
    830   EXPECT_EQ(consumer_queue_->default_format(), kBufferFormat);
    831   EXPECT_EQ(consumer_queue_->is_async(), kIsAsync);
    832 }
    833 
    834 TEST_F(BufferHubQueueTest, TestFreeAllBuffers) {
    835   constexpr size_t kBufferCount = 2;
    836 
    837 #define CHECK_NO_BUFFER_THEN_ALLOCATE(num_buffers)  \
    838   EXPECT_EQ(consumer_queue_->count(), 0U);          \
    839   EXPECT_EQ(consumer_queue_->capacity(), 0U);       \
    840   EXPECT_EQ(producer_queue_->count(), 0U);          \
    841   EXPECT_EQ(producer_queue_->capacity(), 0U);       \
    842   for (size_t i = 0; i < num_buffers; i++) {        \
    843     AllocateBuffer();                               \
    844   }                                                 \
    845   EXPECT_EQ(producer_queue_->count(), num_buffers); \
    846   EXPECT_EQ(producer_queue_->capacity(), num_buffers);
    847 
    848   size_t slot;
    849   LocalHandle fence;
    850   pdx::Status<void> status;
    851   pdx::Status<std::shared_ptr<ConsumerBuffer>> consumer_status;
    852   pdx::Status<std::shared_ptr<ProducerBuffer>> producer_status;
    853   std::shared_ptr<ConsumerBuffer> consumer_buffer;
    854   std::shared_ptr<ProducerBuffer> producer_buffer;
    855   DvrNativeBufferMetadata mi, mo;
    856 
    857   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    858 
    859   // Free all buffers when buffers are avaible for dequeue.
    860   CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
    861   status = producer_queue_->FreeAllBuffers();
    862   EXPECT_TRUE(status.ok());
    863 
    864   // Free all buffers when one buffer is dequeued.
    865   CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
    866   producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    867   ASSERT_TRUE(producer_status.ok());
    868   status = producer_queue_->FreeAllBuffers();
    869   EXPECT_TRUE(status.ok());
    870 
    871   // Free all buffers when all buffers are dequeued.
    872   CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
    873   for (size_t i = 0; i < kBufferCount; i++) {
    874     producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    875     ASSERT_TRUE(producer_status.ok());
    876   }
    877   status = producer_queue_->FreeAllBuffers();
    878   EXPECT_TRUE(status.ok());
    879 
    880   // Free all buffers when one buffer is posted.
    881   CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
    882   producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    883   ASSERT_TRUE(producer_status.ok());
    884   producer_buffer = producer_status.take();
    885   ASSERT_NE(nullptr, producer_buffer);
    886   ASSERT_EQ(0, producer_buffer->PostAsync(&mi, fence));
    887   status = producer_queue_->FreeAllBuffers();
    888   EXPECT_TRUE(status.ok());
    889 
    890   // Free all buffers when all buffers are posted.
    891   CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
    892   for (size_t i = 0; i < kBufferCount; i++) {
    893     producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    894     ASSERT_TRUE(producer_status.ok());
    895     producer_buffer = producer_status.take();
    896     ASSERT_NE(producer_buffer, nullptr);
    897     ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
    898   }
    899   status = producer_queue_->FreeAllBuffers();
    900   EXPECT_TRUE(status.ok());
    901 
    902   // Free all buffers when all buffers are acquired.
    903   CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
    904   for (size_t i = 0; i < kBufferCount; i++) {
    905     producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    906     ASSERT_TRUE(producer_status.ok());
    907     producer_buffer = producer_status.take();
    908     ASSERT_NE(producer_buffer, nullptr);
    909     ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
    910     consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
    911     ASSERT_TRUE(consumer_status.ok()) << consumer_status.GetErrorMessage();
    912   }
    913 
    914   status = producer_queue_->FreeAllBuffers();
    915   EXPECT_TRUE(status.ok());
    916 
    917   // In addition to FreeAllBuffers() from the queue, it is also required to
    918   // delete all references to the ProducerBuffer (i.e. the PDX client).
    919   producer_buffer = nullptr;
    920 
    921   // Crank consumer queue events to pickup EPOLLHUP events on the queue.
    922   consumer_queue_->HandleQueueEvents();
    923 
    924   // One last check.
    925   CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
    926 
    927 #undef CHECK_NO_BUFFER_THEN_ALLOCATE
    928 }
    929 
    930 TEST_F(BufferHubQueueTest, TestProducerToParcelableNotEmpty) {
    931   ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<uint64_t>().Build(),
    932                            UsagePolicy{}));
    933 
    934   // Allocate only one buffer.
    935   AllocateBuffer();
    936 
    937   // Export should fail as the queue is not empty.
    938   auto status = producer_queue_->TakeAsParcelable();
    939   EXPECT_FALSE(status.ok());
    940 }
    941 
    942 TEST_F(BufferHubQueueTest, TestProducerExportToParcelable) {
    943   ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
    944 
    945   auto s1 = producer_queue_->TakeAsParcelable();
    946   EXPECT_TRUE(s1.ok());
    947 
    948   ProducerQueueParcelable output_parcelable = s1.take();
    949   EXPECT_TRUE(output_parcelable.IsValid());
    950 
    951   Parcel parcel;
    952   status_t res;
    953   res = output_parcelable.writeToParcel(&parcel);
    954   EXPECT_EQ(res, OK);
    955 
    956   // After written into parcelable, the output_parcelable is still valid has
    957   // keeps the producer channel alive.
    958   EXPECT_TRUE(output_parcelable.IsValid());
    959 
    960   // Creating producer buffer should fail.
    961   auto s2 = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
    962                                             kBufferLayerCount, kBufferFormat,
    963                                             kBufferUsage);
    964   ASSERT_FALSE(s2.ok());
    965 
    966   // Reset the data position so that we can read back from the same parcel
    967   // without doing actually Binder IPC.
    968   parcel.setDataPosition(0);
    969   producer_queue_ = nullptr;
    970 
    971   // Recreate the producer queue from the parcel.
    972   ProducerQueueParcelable input_parcelable;
    973   EXPECT_FALSE(input_parcelable.IsValid());
    974 
    975   res = input_parcelable.readFromParcel(&parcel);
    976   EXPECT_EQ(res, OK);
    977   EXPECT_TRUE(input_parcelable.IsValid());
    978 
    979   EXPECT_EQ(producer_queue_, nullptr);
    980   producer_queue_ = ProducerQueue::Import(input_parcelable.TakeChannelHandle());
    981   EXPECT_FALSE(input_parcelable.IsValid());
    982   ASSERT_NE(producer_queue_, nullptr);
    983 
    984   // Newly created queue from the parcel can allocate buffer, post buffer to
    985   // consumer.
    986   EXPECT_NO_FATAL_FAILURE(AllocateBuffer());
    987   EXPECT_EQ(producer_queue_->count(), 1U);
    988   EXPECT_EQ(producer_queue_->capacity(), 1U);
    989 
    990   size_t slot;
    991   DvrNativeBufferMetadata producer_meta;
    992   DvrNativeBufferMetadata consumer_meta;
    993   LocalHandle fence;
    994   auto s3 = producer_queue_->Dequeue(0, &slot, &producer_meta, &fence);
    995   EXPECT_TRUE(s3.ok());
    996 
    997   std::shared_ptr<ProducerBuffer> p1 = s3.take();
    998   ASSERT_NE(p1, nullptr);
    999 
   1000   producer_meta.timestamp = 42;
   1001   EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
   1002 
   1003   // Make sure the buffer can be dequeued from consumer side.
   1004   auto s4 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
   1005   EXPECT_TRUE(s4.ok()) << s4.GetErrorMessage();
   1006   EXPECT_EQ(consumer_queue_->capacity(), 1U);
   1007 
   1008   auto consumer = s4.take();
   1009   ASSERT_NE(consumer, nullptr);
   1010   EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
   1011 }
   1012 
   1013 TEST_F(BufferHubQueueTest, TestCreateConsumerParcelable) {
   1014   ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
   1015 
   1016   auto s1 = producer_queue_->CreateConsumerQueueParcelable();
   1017   EXPECT_TRUE(s1.ok());
   1018   ConsumerQueueParcelable output_parcelable = s1.take();
   1019   EXPECT_TRUE(output_parcelable.IsValid());
   1020 
   1021   // Write to a Parcel new object.
   1022   Parcel parcel;
   1023   status_t res;
   1024   res = output_parcelable.writeToParcel(&parcel);
   1025 
   1026   // Reset the data position so that we can read back from the same parcel
   1027   // without doing actually Binder IPC.
   1028   parcel.setDataPosition(0);
   1029 
   1030   // No consumer queue created yet.
   1031   EXPECT_EQ(consumer_queue_, nullptr);
   1032 
   1033   // If the parcel contains a consumer queue, read into a
   1034   // ProducerQueueParcelable should fail.
   1035   ProducerQueueParcelable wrongly_typed_parcelable;
   1036   EXPECT_FALSE(wrongly_typed_parcelable.IsValid());
   1037   res = wrongly_typed_parcelable.readFromParcel(&parcel);
   1038   EXPECT_EQ(res, -EINVAL);
   1039   parcel.setDataPosition(0);
   1040 
   1041   // Create the consumer queue from the parcel.
   1042   ConsumerQueueParcelable input_parcelable;
   1043   EXPECT_FALSE(input_parcelable.IsValid());
   1044 
   1045   res = input_parcelable.readFromParcel(&parcel);
   1046   EXPECT_EQ(res, OK);
   1047   EXPECT_TRUE(input_parcelable.IsValid());
   1048 
   1049   consumer_queue_ = ConsumerQueue::Import(input_parcelable.TakeChannelHandle());
   1050   EXPECT_FALSE(input_parcelable.IsValid());
   1051   ASSERT_NE(consumer_queue_, nullptr);
   1052 
   1053   EXPECT_NO_FATAL_FAILURE(AllocateBuffer());
   1054   EXPECT_EQ(producer_queue_->count(), 1U);
   1055   EXPECT_EQ(producer_queue_->capacity(), 1U);
   1056 
   1057   size_t slot;
   1058   DvrNativeBufferMetadata producer_meta;
   1059   DvrNativeBufferMetadata consumer_meta;
   1060   LocalHandle fence;
   1061   auto s2 = producer_queue_->Dequeue(0, &slot, &producer_meta, &fence);
   1062   EXPECT_TRUE(s2.ok());
   1063 
   1064   std::shared_ptr<ProducerBuffer> p1 = s2.take();
   1065   ASSERT_NE(p1, nullptr);
   1066 
   1067   producer_meta.timestamp = 42;
   1068   EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
   1069 
   1070   // Make sure the buffer can be dequeued from consumer side.
   1071   auto s3 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
   1072   EXPECT_TRUE(s3.ok()) << s3.GetErrorMessage();
   1073   EXPECT_EQ(consumer_queue_->capacity(), 1U);
   1074 
   1075   auto consumer = s3.take();
   1076   ASSERT_NE(consumer, nullptr);
   1077   EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
   1078 }
   1079 
   1080 }  // namespace
   1081 
   1082 }  // namespace dvr
   1083 }  // namespace android
   1084