1 #include <base/logging.h> 2 #include <private/dvr/buffer_hub_client.h> 3 #include <private/dvr/buffer_hub_queue_client.h> 4 5 #include <gtest/gtest.h> 6 #include <poll.h> 7 #include <sys/eventfd.h> 8 9 #include <vector> 10 11 // Enable/disable debug logging. 12 #define TRACE 0 13 14 namespace android { 15 namespace dvr { 16 17 using pdx::LocalHandle; 18 19 namespace { 20 21 constexpr uint32_t kBufferWidth = 100; 22 constexpr uint32_t kBufferHeight = 1; 23 constexpr uint32_t kBufferLayerCount = 1; 24 constexpr uint32_t kBufferFormat = HAL_PIXEL_FORMAT_BLOB; 25 constexpr uint64_t kBufferUsage = GRALLOC_USAGE_SW_READ_RARELY; 26 27 class BufferHubQueueTest : public ::testing::Test { 28 public: 29 bool CreateProducerQueue(const ProducerQueueConfig& config, 30 const UsagePolicy& usage) { 31 producer_queue_ = ProducerQueue::Create(config, usage); 32 return producer_queue_ != nullptr; 33 } 34 35 bool CreateConsumerQueue() { 36 if (producer_queue_) { 37 consumer_queue_ = producer_queue_->CreateConsumerQueue(); 38 return consumer_queue_ != nullptr; 39 } else { 40 return false; 41 } 42 } 43 44 bool CreateQueues(const ProducerQueueConfig& config, 45 const UsagePolicy& usage) { 46 return CreateProducerQueue(config, usage) && CreateConsumerQueue(); 47 } 48 49 void AllocateBuffer(size_t* slot_out = nullptr) { 50 // Create producer buffer. 51 auto status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight, 52 kBufferLayerCount, 53 kBufferFormat, kBufferUsage); 54 55 ASSERT_TRUE(status.ok()); 56 size_t slot = status.take(); 57 if (slot_out) 58 *slot_out = slot; 59 } 60 61 bool WaitAndHandleOnce(BufferHubQueue* queue, int timeout_ms) { 62 pollfd pfd{queue->queue_fd(), POLLIN, 0}; 63 int ret; 64 do { 65 ret = poll(&pfd, 1, timeout_ms); 66 } while (ret == -1 && errno == EINTR); 67 68 if (ret < 0) { 69 ALOGW("Failed to poll queue %d's event fd, error: %s.", queue->id(), 70 strerror(errno)); 71 return false; 72 } else if (ret == 0) { 73 return false; 74 } 75 return queue->HandleQueueEvents(); 76 } 77 78 protected: 79 ProducerQueueConfigBuilder config_builder_; 80 std::unique_ptr<ProducerQueue> producer_queue_; 81 std::unique_ptr<ConsumerQueue> consumer_queue_; 82 }; 83 84 TEST_F(BufferHubQueueTest, TestDequeue) { 85 const size_t nb_dequeue_times = 16; 86 87 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<size_t>().Build(), 88 UsagePolicy{})); 89 90 // Allocate only one buffer. 91 AllocateBuffer(); 92 93 // But dequeue multiple times. 94 for (size_t i = 0; i < nb_dequeue_times; i++) { 95 size_t slot; 96 LocalHandle fence; 97 auto p1_status = producer_queue_->Dequeue(100, &slot, &fence); 98 ASSERT_TRUE(p1_status.ok()); 99 auto p1 = p1_status.take(); 100 ASSERT_NE(nullptr, p1); 101 size_t mi = i; 102 ASSERT_EQ(p1->Post(LocalHandle(), &mi, sizeof(mi)), 0); 103 size_t mo; 104 auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence); 105 ASSERT_TRUE(c1_status.ok()); 106 auto c1 = c1_status.take(); 107 ASSERT_NE(nullptr, c1); 108 ASSERT_EQ(mi, mo); 109 c1->Release(LocalHandle()); 110 } 111 } 112 113 TEST_F(BufferHubQueueTest, TestProducerConsumer) { 114 const size_t kBufferCount = 16; 115 size_t slot; 116 uint64_t seq; 117 118 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<uint64_t>().Build(), 119 UsagePolicy{})); 120 121 for (size_t i = 0; i < kBufferCount; i++) { 122 AllocateBuffer(); 123 124 // Producer queue has all the available buffers on initialize. 125 ASSERT_EQ(producer_queue_->count(), i + 1); 126 ASSERT_EQ(producer_queue_->capacity(), i + 1); 127 128 // Consumer queue has no avaiable buffer on initialize. 129 ASSERT_EQ(consumer_queue_->count(), 0U); 130 // Consumer queue does not import buffers until a dequeue is issued. 131 ASSERT_EQ(consumer_queue_->capacity(), i); 132 // Dequeue returns timeout since no buffer is ready to consumer, but 133 // this implicitly triggers buffer import and bump up |capacity|. 134 LocalHandle fence; 135 auto status = consumer_queue_->Dequeue(100, &slot, &seq, &fence); 136 ASSERT_FALSE(status.ok()); 137 ASSERT_EQ(ETIMEDOUT, status.error()); 138 ASSERT_EQ(consumer_queue_->capacity(), i + 1); 139 } 140 141 // Use eventfd as a stand-in for a fence. 142 LocalHandle post_fence(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)); 143 144 for (size_t i = 0; i < kBufferCount; i++) { 145 LocalHandle fence; 146 147 // First time there is no buffer available to dequeue. 148 auto consumer_status = consumer_queue_->Dequeue(100, &slot, &seq, &fence); 149 ASSERT_FALSE(consumer_status.ok()); 150 ASSERT_EQ(ETIMEDOUT, consumer_status.error()); 151 152 // Make sure Producer buffer is POSTED so that it's ready to Accquire 153 // in the consumer's Dequeue() function. 154 auto producer_status = producer_queue_->Dequeue(100, &slot, &fence); 155 ASSERT_TRUE(producer_status.ok()); 156 auto producer = producer_status.take(); 157 ASSERT_NE(nullptr, producer); 158 159 uint64_t seq_in = static_cast<uint64_t>(i); 160 ASSERT_EQ(producer->Post(post_fence, &seq_in, sizeof(seq_in)), 0); 161 162 // Second time the just the POSTED buffer should be dequeued. 163 uint64_t seq_out = 0; 164 consumer_status = consumer_queue_->Dequeue(100, &slot, &seq_out, &fence); 165 ASSERT_TRUE(consumer_status.ok()); 166 EXPECT_TRUE(fence.IsValid()); 167 168 auto consumer = consumer_status.take(); 169 ASSERT_NE(nullptr, consumer); 170 ASSERT_EQ(seq_in, seq_out); 171 } 172 } 173 174 TEST_F(BufferHubQueueTest, TestRemoveBuffer) { 175 ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{})); 176 177 // Allocate buffers. 178 const size_t kBufferCount = 4u; 179 for (size_t i = 0; i < kBufferCount; i++) { 180 AllocateBuffer(); 181 } 182 ASSERT_EQ(kBufferCount, producer_queue_->count()); 183 ASSERT_EQ(kBufferCount, producer_queue_->capacity()); 184 185 consumer_queue_ = producer_queue_->CreateConsumerQueue(); 186 ASSERT_NE(nullptr, consumer_queue_); 187 188 // Check that buffers are correctly imported on construction. 189 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 190 EXPECT_EQ(0u, consumer_queue_->count()); 191 192 // Dequeue all the buffers and keep track of them in an array. This prevents 193 // the producer queue ring buffer ref counts from interfering with the tests. 194 struct Entry { 195 std::shared_ptr<BufferProducer> buffer; 196 LocalHandle fence; 197 size_t slot; 198 }; 199 std::array<Entry, kBufferCount> buffers; 200 201 for (size_t i = 0; i < kBufferCount; i++) { 202 Entry* entry = &buffers[i]; 203 auto producer_status = producer_queue_->Dequeue( 204 /*timeout_ms=*/100, &entry->slot, &entry->fence); 205 ASSERT_TRUE(producer_status.ok()); 206 entry->buffer = producer_status.take(); 207 ASSERT_NE(nullptr, entry->buffer); 208 } 209 210 // Remove a buffer and make sure both queues reflect the change. 211 ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[0].slot)); 212 EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity()); 213 214 // As long as the removed buffer is still alive the consumer queue won't know 215 // its gone. 216 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 217 EXPECT_FALSE(consumer_queue_->HandleQueueEvents()); 218 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 219 220 // Release the removed buffer. 221 buffers[0].buffer = nullptr; 222 223 // Now the consumer queue should know it's gone. 224 EXPECT_FALSE(WaitAndHandleOnce(consumer_queue_.get(), /*timeout_ms=*/100)); 225 ASSERT_EQ(kBufferCount - 1, consumer_queue_->capacity()); 226 227 // Allocate a new buffer. This should take the first empty slot. 228 size_t slot; 229 AllocateBuffer(&slot); 230 ALOGE_IF(TRACE, "ALLOCATE %zu", slot); 231 EXPECT_EQ(buffers[0].slot, slot); 232 EXPECT_EQ(kBufferCount, producer_queue_->capacity()); 233 234 // The consumer queue should pick up the new buffer. 235 EXPECT_EQ(kBufferCount - 1, consumer_queue_->capacity()); 236 EXPECT_FALSE(consumer_queue_->HandleQueueEvents()); 237 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 238 239 // Remove and allocate a buffer. 240 ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[1].slot)); 241 EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity()); 242 buffers[1].buffer = nullptr; 243 244 AllocateBuffer(&slot); 245 ALOGE_IF(TRACE, "ALLOCATE %zu", slot); 246 EXPECT_EQ(buffers[1].slot, slot); 247 EXPECT_EQ(kBufferCount, producer_queue_->capacity()); 248 249 // The consumer queue should pick up the new buffer but the count shouldn't 250 // change. 251 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 252 EXPECT_FALSE(consumer_queue_->HandleQueueEvents()); 253 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 254 255 // Remove and allocate a buffer, but don't free the buffer right away. 256 ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[2].slot)); 257 EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity()); 258 259 AllocateBuffer(&slot); 260 ALOGE_IF(TRACE, "ALLOCATE %zu", slot); 261 EXPECT_EQ(buffers[2].slot, slot); 262 EXPECT_EQ(kBufferCount, producer_queue_->capacity()); 263 264 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 265 EXPECT_FALSE(consumer_queue_->HandleQueueEvents()); 266 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 267 268 // Release the producer buffer to trigger a POLLHUP event for an already 269 // removed buffer. 270 buffers[2].buffer = nullptr; 271 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 272 EXPECT_FALSE(consumer_queue_->HandleQueueEvents()); 273 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 274 } 275 276 TEST_F(BufferHubQueueTest, TestMultipleConsumers) { 277 // ProducerConfigureBuilder doesn't set Metadata{size}, which means there 278 // is no metadata associated with this BufferQueue's buffer. 279 ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{})); 280 281 // Allocate buffers. 282 const size_t kBufferCount = 4u; 283 for (size_t i = 0; i < kBufferCount; i++) { 284 AllocateBuffer(); 285 } 286 ASSERT_EQ(kBufferCount, producer_queue_->count()); 287 288 // Build a silent consumer queue to test multi-consumer queue features. 289 auto silent_queue = producer_queue_->CreateSilentConsumerQueue(); 290 ASSERT_NE(nullptr, silent_queue); 291 292 // Check that silent queue doesn't import buffers on creation. 293 EXPECT_EQ(0, silent_queue->capacity()); 294 295 // Dequeue and post a buffer. 296 size_t slot; 297 LocalHandle fence; 298 auto producer_status = 299 producer_queue_->Dequeue(/*timeout_ms=*/100, &slot, &fence); 300 ASSERT_TRUE(producer_status.ok()); 301 auto producer_buffer = producer_status.take(); 302 ASSERT_NE(nullptr, producer_buffer); 303 ASSERT_EQ(0, producer_buffer->Post<void>({})); 304 // After post, check the number of remaining available buffers. 305 EXPECT_EQ(kBufferCount - 1, producer_queue_->count()); 306 307 // Currently we expect no buffer to be available prior to calling 308 // WaitForBuffers/HandleQueueEvents. 309 // TODO(eieio): Note this behavior may change in the future. 310 EXPECT_EQ(0u, silent_queue->count()); 311 EXPECT_FALSE(silent_queue->HandleQueueEvents()); 312 EXPECT_EQ(0u, silent_queue->count()); 313 314 // Build a new consumer queue to test multi-consumer queue features. 315 consumer_queue_ = silent_queue->CreateConsumerQueue(); 316 ASSERT_NE(nullptr, consumer_queue_); 317 318 // Check that buffers are correctly imported on construction. 319 EXPECT_EQ(kBufferCount, consumer_queue_->capacity()); 320 EXPECT_EQ(1u, consumer_queue_->count()); 321 322 // Reclaim released/ignored buffers. 323 ASSERT_EQ(kBufferCount - 1, producer_queue_->count()); 324 325 usleep(10000); 326 WaitAndHandleOnce(producer_queue_.get(), /*timeout_ms=*/100); 327 ASSERT_EQ(kBufferCount - 1, producer_queue_->count()); 328 329 // Post another buffer. 330 producer_status = producer_queue_->Dequeue(/*timeout_ms=*/100, &slot, &fence); 331 ASSERT_TRUE(producer_status.ok()); 332 producer_buffer = producer_status.take(); 333 ASSERT_NE(nullptr, producer_buffer); 334 ASSERT_EQ(0, producer_buffer->Post<void>({})); 335 336 // Verify that the consumer queue receives it. 337 size_t consumer_queue_count = consumer_queue_->count(); 338 WaitAndHandleOnce(consumer_queue_.get(), /*timeout_ms=*/100); 339 EXPECT_LT(consumer_queue_count, consumer_queue_->count()); 340 341 // Save the current consumer queue buffer count to compare after the dequeue. 342 consumer_queue_count = consumer_queue_->count(); 343 344 // Dequeue and acquire/release (discard) buffers on the consumer end. 345 auto consumer_status = 346 consumer_queue_->Dequeue(/*timeout_ms=*/100, &slot, &fence); 347 ASSERT_TRUE(consumer_status.ok()); 348 auto consumer_buffer = consumer_status.take(); 349 ASSERT_NE(nullptr, consumer_buffer); 350 consumer_buffer->Discard(); 351 352 // Buffer should be returned to the producer queue without being handled by 353 // the silent consumer queue. 354 EXPECT_GT(consumer_queue_count, consumer_queue_->count()); 355 EXPECT_EQ(kBufferCount - 2, producer_queue_->count()); 356 EXPECT_TRUE(producer_queue_->HandleQueueEvents()); 357 EXPECT_EQ(kBufferCount - 1, producer_queue_->count()); 358 } 359 360 struct TestMetadata { 361 char a; 362 int32_t b; 363 int64_t c; 364 }; 365 366 TEST_F(BufferHubQueueTest, TestMetadata) { 367 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<TestMetadata>().Build(), 368 UsagePolicy{})); 369 370 AllocateBuffer(); 371 372 std::vector<TestMetadata> ms = { 373 {'0', 0, 0}, {'1', 10, 3333}, {'@', 123, 1000000000}}; 374 375 for (auto mi : ms) { 376 size_t slot; 377 LocalHandle fence; 378 auto p1_status = producer_queue_->Dequeue(100, &slot, &fence); 379 ASSERT_TRUE(p1_status.ok()); 380 auto p1 = p1_status.take(); 381 ASSERT_NE(nullptr, p1); 382 ASSERT_EQ(p1->Post(LocalHandle(-1), &mi, sizeof(mi)), 0); 383 TestMetadata mo; 384 auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence); 385 ASSERT_TRUE(c1_status.ok()); 386 auto c1 = c1_status.take(); 387 ASSERT_EQ(mi.a, mo.a); 388 ASSERT_EQ(mi.b, mo.b); 389 ASSERT_EQ(mi.c, mo.c); 390 c1->Release(LocalHandle(-1)); 391 } 392 } 393 394 TEST_F(BufferHubQueueTest, TestMetadataMismatch) { 395 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(), 396 UsagePolicy{})); 397 398 AllocateBuffer(); 399 400 int64_t mi = 3; 401 size_t slot; 402 LocalHandle fence; 403 auto p1_status = producer_queue_->Dequeue(100, &slot, &fence); 404 ASSERT_TRUE(p1_status.ok()); 405 auto p1 = p1_status.take(); 406 ASSERT_NE(nullptr, p1); 407 ASSERT_EQ(p1->Post(LocalHandle(-1), &mi, sizeof(mi)), 0); 408 409 int32_t mo; 410 // Acquire a buffer with mismatched metadata is not OK. 411 auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence); 412 ASSERT_FALSE(c1_status.ok()); 413 } 414 415 TEST_F(BufferHubQueueTest, TestEnqueue) { 416 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(), 417 UsagePolicy{})); 418 AllocateBuffer(); 419 420 size_t slot; 421 LocalHandle fence; 422 auto p1_status = producer_queue_->Dequeue(100, &slot, &fence); 423 ASSERT_TRUE(p1_status.ok()); 424 auto p1 = p1_status.take(); 425 ASSERT_NE(nullptr, p1); 426 427 int64_t mo; 428 producer_queue_->Enqueue(p1, slot, 0ULL); 429 auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence); 430 ASSERT_FALSE(c1_status.ok()); 431 } 432 433 TEST_F(BufferHubQueueTest, TestAllocateBuffer) { 434 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(), 435 UsagePolicy{})); 436 437 size_t s1; 438 AllocateBuffer(); 439 LocalHandle fence; 440 auto p1_status = producer_queue_->Dequeue(100, &s1, &fence); 441 ASSERT_TRUE(p1_status.ok()); 442 auto p1 = p1_status.take(); 443 ASSERT_NE(nullptr, p1); 444 445 // producer queue is exhausted 446 size_t s2; 447 auto p2_status = producer_queue_->Dequeue(100, &s2, &fence); 448 ASSERT_FALSE(p2_status.ok()); 449 ASSERT_EQ(ETIMEDOUT, p2_status.error()); 450 451 // dynamically add buffer. 452 AllocateBuffer(); 453 ASSERT_EQ(producer_queue_->count(), 1U); 454 ASSERT_EQ(producer_queue_->capacity(), 2U); 455 456 // now we can dequeue again 457 p2_status = producer_queue_->Dequeue(100, &s2, &fence); 458 ASSERT_TRUE(p2_status.ok()); 459 auto p2 = p2_status.take(); 460 ASSERT_NE(nullptr, p2); 461 ASSERT_EQ(producer_queue_->count(), 0U); 462 // p1 and p2 should have different slot number 463 ASSERT_NE(s1, s2); 464 465 // Consumer queue does not import buffers until |Dequeue| or |ImportBuffers| 466 // are called. So far consumer_queue_ should be empty. 467 ASSERT_EQ(consumer_queue_->count(), 0U); 468 469 int64_t seq = 1; 470 ASSERT_EQ(p1->Post(LocalHandle(), seq), 0); 471 size_t cs1, cs2; 472 auto c1_status = consumer_queue_->Dequeue(100, &cs1, &seq, &fence); 473 ASSERT_TRUE(c1_status.ok()); 474 auto c1 = c1_status.take(); 475 ASSERT_NE(nullptr, c1); 476 ASSERT_EQ(consumer_queue_->count(), 0U); 477 ASSERT_EQ(consumer_queue_->capacity(), 2U); 478 ASSERT_EQ(cs1, s1); 479 480 ASSERT_EQ(p2->Post(LocalHandle(), seq), 0); 481 auto c2_status = consumer_queue_->Dequeue(100, &cs2, &seq, &fence); 482 ASSERT_TRUE(c2_status.ok()); 483 auto c2 = c2_status.take(); 484 ASSERT_NE(nullptr, c2); 485 ASSERT_EQ(cs2, s2); 486 } 487 488 TEST_F(BufferHubQueueTest, TestUsageSetMask) { 489 const uint32_t set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN; 490 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(), 491 UsagePolicy{set_mask, 0, 0, 0})); 492 493 // When allocation, leave out |set_mask| from usage bits on purpose. 494 auto status = producer_queue_->AllocateBuffer( 495 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat, 496 kBufferUsage & ~set_mask); 497 ASSERT_TRUE(status.ok()); 498 499 LocalHandle fence; 500 size_t slot; 501 auto p1_status = producer_queue_->Dequeue(100, &slot, &fence); 502 ASSERT_TRUE(p1_status.ok()); 503 auto p1 = p1_status.take(); 504 ASSERT_EQ(p1->usage() & set_mask, set_mask); 505 } 506 507 TEST_F(BufferHubQueueTest, TestUsageClearMask) { 508 const uint32_t clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN; 509 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(), 510 UsagePolicy{0, clear_mask, 0, 0})); 511 512 // When allocation, add |clear_mask| into usage bits on purpose. 513 auto status = producer_queue_->AllocateBuffer( 514 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat, 515 kBufferUsage | clear_mask); 516 ASSERT_TRUE(status.ok()); 517 518 LocalHandle fence; 519 size_t slot; 520 auto p1_status = producer_queue_->Dequeue(100, &slot, &fence); 521 ASSERT_TRUE(p1_status.ok()); 522 auto p1 = p1_status.take(); 523 ASSERT_EQ(0u, p1->usage() & clear_mask); 524 } 525 526 TEST_F(BufferHubQueueTest, TestUsageDenySetMask) { 527 const uint32_t deny_set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN; 528 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(), 529 UsagePolicy{0, 0, deny_set_mask, 0})); 530 531 // Now that |deny_set_mask| is illegal, allocation without those bits should 532 // be able to succeed. 533 auto status = producer_queue_->AllocateBuffer( 534 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat, 535 kBufferUsage & ~deny_set_mask); 536 ASSERT_TRUE(status.ok()); 537 538 // While allocation with those bits should fail. 539 status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight, 540 kBufferLayerCount, kBufferFormat, 541 kBufferUsage | deny_set_mask); 542 ASSERT_FALSE(status.ok()); 543 ASSERT_EQ(EINVAL, status.error()); 544 } 545 546 TEST_F(BufferHubQueueTest, TestUsageDenyClearMask) { 547 const uint32_t deny_clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN; 548 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(), 549 UsagePolicy{0, 0, 0, deny_clear_mask})); 550 551 // Now that clearing |deny_clear_mask| is illegal (i.e. setting these bits are 552 // mandatory), allocation with those bits should be able to succeed. 553 auto status = producer_queue_->AllocateBuffer( 554 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat, 555 kBufferUsage | deny_clear_mask); 556 ASSERT_TRUE(status.ok()); 557 558 // While allocation without those bits should fail. 559 status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight, 560 kBufferLayerCount, kBufferFormat, 561 kBufferUsage & ~deny_clear_mask); 562 ASSERT_FALSE(status.ok()); 563 ASSERT_EQ(EINVAL, status.error()); 564 } 565 566 TEST_F(BufferHubQueueTest, TestQueueInfo) { 567 static const bool kIsAsync = true; 568 ASSERT_TRUE(CreateQueues(config_builder_.SetIsAsync(kIsAsync) 569 .SetDefaultWidth(kBufferWidth) 570 .SetDefaultHeight(kBufferHeight) 571 .SetDefaultFormat(kBufferFormat) 572 .Build(), 573 UsagePolicy{})); 574 575 EXPECT_EQ(producer_queue_->default_width(), kBufferWidth); 576 EXPECT_EQ(producer_queue_->default_height(), kBufferHeight); 577 EXPECT_EQ(producer_queue_->default_format(), kBufferFormat); 578 EXPECT_EQ(producer_queue_->is_async(), kIsAsync); 579 580 EXPECT_EQ(consumer_queue_->default_width(), kBufferWidth); 581 EXPECT_EQ(consumer_queue_->default_height(), kBufferHeight); 582 EXPECT_EQ(consumer_queue_->default_format(), kBufferFormat); 583 EXPECT_EQ(consumer_queue_->is_async(), kIsAsync); 584 } 585 586 TEST_F(BufferHubQueueTest, TestFreeAllBuffers) { 587 constexpr size_t kBufferCount = 2; 588 589 #define CHECK_NO_BUFFER_THEN_ALLOCATE(num_buffers) \ 590 EXPECT_EQ(consumer_queue_->count(), 0U); \ 591 EXPECT_EQ(consumer_queue_->capacity(), 0U); \ 592 EXPECT_EQ(producer_queue_->count(), 0U); \ 593 EXPECT_EQ(producer_queue_->capacity(), 0U); \ 594 for (size_t i = 0; i < num_buffers; i++) { \ 595 AllocateBuffer(); \ 596 } \ 597 EXPECT_EQ(producer_queue_->count(), num_buffers); \ 598 EXPECT_EQ(producer_queue_->capacity(), num_buffers); 599 600 size_t slot; 601 uint64_t seq; 602 LocalHandle fence; 603 pdx::Status<void> status; 604 pdx::Status<std::shared_ptr<BufferConsumer>> consumer_status; 605 pdx::Status<std::shared_ptr<BufferProducer>> producer_status; 606 std::shared_ptr<BufferConsumer> consumer_buffer; 607 std::shared_ptr<BufferProducer> producer_buffer; 608 609 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<uint64_t>().Build(), 610 UsagePolicy{})); 611 612 // Free all buffers when buffers are avaible for dequeue. 613 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount); 614 status = producer_queue_->FreeAllBuffers(); 615 EXPECT_TRUE(status.ok()); 616 617 // Free all buffers when one buffer is dequeued. 618 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount); 619 producer_status = producer_queue_->Dequeue(100, &slot, &fence); 620 ASSERT_TRUE(producer_status.ok()); 621 status = producer_queue_->FreeAllBuffers(); 622 EXPECT_TRUE(status.ok()); 623 624 // Free all buffers when all buffers are dequeued. 625 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount); 626 for (size_t i = 0; i < kBufferCount; i++) { 627 producer_status = producer_queue_->Dequeue(100, &slot, &fence); 628 ASSERT_TRUE(producer_status.ok()); 629 } 630 status = producer_queue_->FreeAllBuffers(); 631 EXPECT_TRUE(status.ok()); 632 633 // Free all buffers when one buffer is posted. 634 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount); 635 producer_status = producer_queue_->Dequeue(100, &slot, &fence); 636 ASSERT_TRUE(producer_status.ok()); 637 producer_buffer = producer_status.take(); 638 ASSERT_NE(nullptr, producer_buffer); 639 ASSERT_EQ(0, producer_buffer->Post(fence, &seq, sizeof(seq))); 640 status = producer_queue_->FreeAllBuffers(); 641 EXPECT_TRUE(status.ok()); 642 643 // Free all buffers when all buffers are posted. 644 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount); 645 for (size_t i = 0; i < kBufferCount; i++) { 646 producer_status = producer_queue_->Dequeue(100, &slot, &fence); 647 ASSERT_TRUE(producer_status.ok()); 648 producer_buffer = producer_status.take(); 649 ASSERT_NE(nullptr, producer_buffer); 650 ASSERT_EQ(0, producer_buffer->Post(fence, &seq, sizeof(seq))); 651 } 652 status = producer_queue_->FreeAllBuffers(); 653 EXPECT_TRUE(status.ok()); 654 655 // Free all buffers when all buffers are acquired. 656 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount); 657 for (size_t i = 0; i < kBufferCount; i++) { 658 producer_status = producer_queue_->Dequeue(100, &slot, &fence); 659 ASSERT_TRUE(producer_status.ok()); 660 producer_buffer = producer_status.take(); 661 ASSERT_NE(nullptr, producer_buffer); 662 ASSERT_EQ(0, producer_buffer->Post(fence, &seq, sizeof(seq))); 663 consumer_status = consumer_queue_->Dequeue(100, &slot, &seq, &fence); 664 ASSERT_TRUE(consumer_status.ok()); 665 } 666 667 status = producer_queue_->FreeAllBuffers(); 668 EXPECT_TRUE(status.ok()); 669 670 // In addition to FreeAllBuffers() from the queue, it is also required to 671 // delete all references to the ProducerBuffer (i.e. the PDX client). 672 producer_buffer = nullptr; 673 674 // Crank consumer queue events to pickup EPOLLHUP events on the queue. 675 consumer_queue_->HandleQueueEvents(); 676 677 // One last check. 678 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount); 679 680 #undef CHECK_NO_BUFFER_THEN_ALLOCATE 681 } 682 683 } // namespace 684 685 } // namespace dvr 686 } // namespace android 687