1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef HIDL_MQ_H 18 #define HIDL_MQ_H 19 20 #include <atomic> 21 #include <cutils/ashmem.h> 22 #include <fmq/EventFlag.h> 23 #include <hidl/MQDescriptor.h> 24 #include <new> 25 #include <sys/mman.h> 26 #include <utils/Log.h> 27 #include <utils/SystemClock.h> 28 29 namespace android { 30 namespace hardware { 31 32 namespace details { 33 void check(bool exp); 34 void logError(const std::string &message); 35 } // namespace details 36 37 template <typename T, MQFlavor flavor> 38 struct MessageQueue { 39 typedef MQDescriptor<T, flavor> Descriptor; 40 41 /** 42 * @param Desc MQDescriptor describing the FMQ. 43 * @param resetPointers bool indicating whether the read/write pointers 44 * should be reset or not. 45 */ 46 MessageQueue(const Descriptor& Desc, bool resetPointers = true); 47 48 ~MessageQueue(); 49 50 /** 51 * This constructor uses Ashmem shared memory to create an FMQ 52 * that can contain a maximum of 'numElementsInQueue' elements of type T. 53 * 54 * @param numElementsInQueue Capacity of the MessageQueue in terms of T. 55 * @param configureEventFlagWord Boolean that specifies if memory should 56 * also be allocated and mapped for an EventFlag word. 57 */ 58 MessageQueue(size_t numElementsInQueue, bool configureEventFlagWord = false); 59 60 /** 61 * @return Number of items of type T that can be written into the FMQ 62 * without a read. 63 */ 64 size_t availableToWrite() const; 65 66 /** 67 * @return Number of items of type T that are waiting to be read from the 68 * FMQ. 69 */ 70 size_t availableToRead() const; 71 72 /** 73 * Returns the size of type T in bytes. 74 * 75 * @param Size of T. 76 */ 77 size_t getQuantumSize() const; 78 79 /** 80 * Returns the size of the FMQ in terms of the size of type T. 81 * 82 * @return Number of items of type T that will fit in the FMQ. 83 */ 84 size_t getQuantumCount() const; 85 86 /** 87 * @return Whether the FMQ is configured correctly. 88 */ 89 bool isValid() const; 90 91 /** 92 * Non-blocking write to FMQ. 93 * 94 * @param data Pointer to the object of type T to be written into the FMQ. 95 * 96 * @return Whether the write was successful. 97 */ 98 bool write(const T* data); 99 100 /** 101 * Non-blocking read from FMQ. 102 * 103 * @param data Pointer to the memory where the object read from the FMQ is 104 * copied to. 105 * 106 * @return Whether the read was successful. 107 */ 108 bool read(T* data); 109 110 /** 111 * Write some data into the FMQ without blocking. 112 * 113 * @param data Pointer to the array of items of type T. 114 * @param count Number of items in array. 115 * 116 * @return Whether the write was successful. 117 */ 118 bool write(const T* data, size_t count); 119 120 /** 121 * Perform a blocking write of 'count' items into the FMQ using EventFlags. 122 * Does not support partial writes. 123 * 124 * If 'evFlag' is nullptr, it is checked whether there is an EventFlag object 125 * associated with the FMQ and it is used in that case. 126 * 127 * The application code must ensure that 'evFlag' used by the 128 * reader(s)/writer is based upon the same EventFlag word. 129 * 130 * The method will return false without blocking if any of the following 131 * conditions are true: 132 * - If 'evFlag' is nullptr and the FMQ does not own an EventFlag object. 133 * - If the 'readNotification' bit mask is zero. 134 * - If 'count' is greater than the FMQ size. 135 * 136 * If the there is insufficient space available to write into it, the 137 * EventFlag bit mask 'readNotification' is is waited upon. 138 * 139 * This method should only be used with a MessageQueue of the flavor 140 * 'kSynchronizedReadWrite'. 141 * 142 * Upon a successful write, wake is called on 'writeNotification' (if 143 * non-zero). 144 * 145 * @param data Pointer to the array of items of type T. 146 * @param count Number of items in array. 147 * @param readNotification The EventFlag bit mask to wait on if there is not 148 * enough space in FMQ to write 'count' items. 149 * @param writeNotification The EventFlag bit mask to call wake on 150 * a successful write. No wake is called if 'writeNotification' is zero. 151 * @param timeOutNanos Number of nanoseconds after which the blocking 152 * write attempt is aborted. 153 * @param evFlag The EventFlag object to be used for blocking. If nullptr, 154 * it is checked whether the FMQ owns an EventFlag object and that is used 155 * for blocking instead. 156 * 157 * @return Whether the write was successful. 158 */ 159 bool writeBlocking(const T* data, size_t count, uint32_t readNotification, 160 uint32_t writeNotification, int64_t timeOutNanos = 0, 161 android::hardware::EventFlag* evFlag = nullptr); 162 163 bool writeBlocking(const T* data, size_t count, int64_t timeOutNanos = 0); 164 165 /** 166 * Read some data from the FMQ without blocking. 167 * 168 * @param data Pointer to the array to which read data is to be written. 169 * @param count Number of items to be read. 170 * 171 * @return Whether the read was successful. 172 */ 173 bool read(T* data, size_t count); 174 175 /** 176 * Perform a blocking read operation of 'count' items from the FMQ. Does not 177 * perform a partial read. 178 * 179 * If 'evFlag' is nullptr, it is checked whether there is an EventFlag object 180 * associated with the FMQ and it is used in that case. 181 * 182 * The application code must ensure that 'evFlag' used by the 183 * reader(s)/writer is based upon the same EventFlag word. 184 * 185 * The method will return false without blocking if any of the following 186 * conditions are true: 187 * -If 'evFlag' is nullptr and the FMQ does not own an EventFlag object. 188 * -If the 'writeNotification' bit mask is zero. 189 * -If 'count' is greater than the FMQ size. 190 * 191 * This method should only be used with a MessageQueue of the flavor 192 * 'kSynchronizedReadWrite'. 193 194 * If FMQ does not contain 'count' items, the eventFlag bit mask 195 * 'writeNotification' is waited upon. Upon a successful read from the FMQ, 196 * wake is called on 'readNotification' (if non-zero). 197 * 198 * @param data Pointer to the array to which read data is to be written. 199 * @param count Number of items to be read. 200 * @param readNotification The EventFlag bit mask to call wake on after 201 * a successful read. No wake is called if 'readNotification' is zero. 202 * @param writeNotification The EventFlag bit mask to call a wait on 203 * if there is insufficient data in the FMQ to be read. 204 * @param timeOutNanos Number of nanoseconds after which the blocking 205 * read attempt is aborted. 206 * @param evFlag The EventFlag object to be used for blocking. 207 * 208 * @return Whether the read was successful. 209 */ 210 bool readBlocking(T* data, size_t count, uint32_t readNotification, 211 uint32_t writeNotification, int64_t timeOutNanos = 0, 212 android::hardware::EventFlag* evFlag = nullptr); 213 214 bool readBlocking(T* data, size_t count, int64_t timeOutNanos = 0); 215 216 /** 217 * Get a pointer to the MQDescriptor object that describes this FMQ. 218 * 219 * @return Pointer to the MQDescriptor associated with the FMQ. 220 */ 221 const Descriptor* getDesc() const { return mDesc.get(); } 222 223 /** 224 * Get a pointer to the EventFlag word if there is one associated with this FMQ. 225 * 226 * @return Pointer to an EventFlag word, will return nullptr if not 227 * configured. This method does not transfer ownership. The EventFlag 228 * word will be unmapped by the MessageQueue destructor. 229 */ 230 std::atomic<uint32_t>* getEventFlagWord() const { return mEvFlagWord; } 231 232 /** 233 * Describes a memory region in the FMQ. 234 */ 235 struct MemRegion { 236 MemRegion() : MemRegion(nullptr, 0) {} 237 238 MemRegion(T* base, size_t size) : address(base), length(size) {} 239 240 MemRegion& operator=(const MemRegion &other) { 241 address = other.address; 242 length = other.length; 243 return *this; 244 } 245 246 /** 247 * Gets a pointer to the base address of the MemRegion. 248 */ 249 inline T* getAddress() const { return address; } 250 251 /** 252 * Gets the length of the MemRegion. This would equal to the number 253 * of items of type T that can be read from/written into the MemRegion. 254 */ 255 inline size_t getLength() const { return length; } 256 257 /** 258 * Gets the length of the MemRegion in bytes. 259 */ 260 inline size_t getLengthInBytes() const { return length * sizeof(T); } 261 262 private: 263 /* Base address */ 264 T* address; 265 266 /* 267 * Number of items of type T that can be written to/read from the base 268 * address. 269 */ 270 size_t length; 271 }; 272 273 /** 274 * Describes the memory regions to be used for a read or write. 275 * The struct contains two MemRegion objects since the FMQ is a ring 276 * buffer and a read or write operation can wrap around. A single message 277 * of type T will never be broken between the two MemRegions. 278 */ 279 struct MemTransaction { 280 MemTransaction() : MemTransaction(MemRegion(), MemRegion()) {} 281 282 MemTransaction(const MemRegion& regionFirst, const MemRegion& regionSecond) : 283 first(regionFirst), second(regionSecond) {} 284 285 MemTransaction& operator=(const MemTransaction &other) { 286 first = other.first; 287 second = other.second; 288 return *this; 289 } 290 291 /** 292 * Helper method to calculate the address for a particular index for 293 * the MemTransaction object. 294 * 295 * @param idx Index of the slot to be read/written. If the 296 * MemTransaction object is representing the memory region to read/write 297 * N items of type T, the valid range of idx is between 0 and N-1. 298 * 299 * @return Pointer to the slot idx. Will be nullptr for an invalid idx. 300 */ 301 T* getSlot(size_t idx); 302 303 /** 304 * Helper method to write 'nMessages' items of type T into the memory 305 * regions described by the object starting from 'startIdx'. This method 306 * uses memcpy() and is not to meant to be used for a zero copy operation. 307 * Partial writes are not supported. 308 * 309 * @param data Pointer to the source buffer. 310 * @param nMessages Number of items of type T. 311 * @param startIdx The slot number to begin the write from. If the 312 * MemTransaction object is representing the memory region to read/write 313 * N items of type T, the valid range of startIdx is between 0 and N-1; 314 * 315 * @return Whether the write operation of size 'nMessages' succeeded. 316 */ 317 bool copyTo(const T* data, size_t startIdx, size_t nMessages = 1); 318 319 /* 320 * Helper method to read 'nMessages' items of type T from the memory 321 * regions described by the object starting from 'startIdx'. This method uses 322 * memcpy() and is not meant to be used for a zero copy operation. Partial reads 323 * are not supported. 324 * 325 * @param data Pointer to the destination buffer. 326 * @param nMessages Number of items of type T. 327 * @param startIdx The slot number to begin the read from. If the 328 * MemTransaction object is representing the memory region to read/write 329 * N items of type T, the valid range of startIdx is between 0 and N-1. 330 * 331 * @return Whether the read operation of size 'nMessages' succeeded. 332 */ 333 bool copyFrom(T* data, size_t startIdx, size_t nMessages = 1); 334 335 /** 336 * Returns a const reference to the first MemRegion in the 337 * MemTransaction object. 338 */ 339 inline const MemRegion& getFirstRegion() const { return first; } 340 341 /** 342 * Returns a const reference to the second MemRegion in the 343 * MemTransaction object. 344 */ 345 inline const MemRegion& getSecondRegion() const { return second; } 346 347 private: 348 /* 349 * Given a start index and the number of messages to be 350 * read/written, this helper method calculates the 351 * number of messages that should should be written to both the first 352 * and second MemRegions and the base addresses to be used for 353 * the read/write operation. 354 * 355 * Returns false if the 'startIdx' and 'nMessages' is 356 * invalid for the MemTransaction object. 357 */ 358 bool inline getMemRegionInfo(size_t idx, 359 size_t nMessages, 360 size_t& firstCount, 361 size_t& secondCount, 362 T** firstBaseAddress, 363 T** secondBaseAddress); 364 MemRegion first; 365 MemRegion second; 366 }; 367 368 /** 369 * Get a MemTransaction object to write 'nMessages' items of type T. 370 * Once the write is performed using the information from MemTransaction, 371 * the write operation is to be committed using a call to commitWrite(). 372 * 373 * @param nMessages Number of messages of type T. 374 * @param Pointer to MemTransaction struct that describes memory to write 'nMessages' 375 * items of type T. If a write of size 'nMessages' is not possible, the base 376 * addresses in the MemTransaction object would be set to nullptr. 377 * 378 * @return Whether it is possible to write 'nMessages' items of type T 379 * into the FMQ. 380 */ 381 bool beginWrite(size_t nMessages, MemTransaction* memTx) const; 382 383 /** 384 * Commit a write of size 'nMessages'. To be only used after a call to beginWrite(). 385 * 386 * @param nMessages number of messages of type T to be written. 387 * 388 * @return Whether the write operation of size 'nMessages' succeeded. 389 */ 390 bool commitWrite(size_t nMessages); 391 392 /** 393 * Get a MemTransaction object to read 'nMessages' items of type T. 394 * Once the read is performed using the information from MemTransaction, 395 * the read operation is to be committed using a call to commitRead(). 396 * 397 * @param nMessages Number of messages of type T. 398 * @param pointer to MemTransaction struct that describes memory to read 'nMessages' 399 * items of type T. If a read of size 'nMessages' is not possible, the base 400 * pointers in the MemTransaction object returned will be set to nullptr. 401 * 402 * @return bool Whether it is possible to read 'nMessages' items of type T 403 * from the FMQ. 404 */ 405 bool beginRead(size_t nMessages, MemTransaction* memTx) const; 406 407 /** 408 * Commit a read of size 'nMessages'. To be only used after a call to beginRead(). 409 * For the unsynchronized flavor of FMQ, this method will return a failure 410 * if a write overflow happened after beginRead() was invoked. 411 * 412 * @param nMessages number of messages of type T to be read. 413 * 414 * @return bool Whether the read operation of size 'nMessages' succeeded. 415 */ 416 bool commitRead(size_t nMessages); 417 418 private: 419 420 size_t availableToWriteBytes() const; 421 size_t availableToReadBytes() const; 422 423 MessageQueue(const MessageQueue& other) = delete; 424 MessageQueue& operator=(const MessageQueue& other) = delete; 425 MessageQueue(); 426 427 void* mapGrantorDescr(uint32_t grantorIdx); 428 void unmapGrantorDescr(void* address, uint32_t grantorIdx); 429 void initMemory(bool resetPointers); 430 431 enum DefaultEventNotification : uint32_t { 432 /* 433 * These are only used internally by the readBlocking()/writeBlocking() 434 * methods and hence once other bit combinations are not required. 435 */ 436 FMQ_NOT_FULL = 0x01, 437 FMQ_NOT_EMPTY = 0x02 438 }; 439 440 std::unique_ptr<Descriptor> mDesc; 441 uint8_t* mRing = nullptr; 442 /* 443 * TODO(b/31550092): Change to 32 bit read and write pointer counters. 444 */ 445 std::atomic<uint64_t>* mReadPtr = nullptr; 446 std::atomic<uint64_t>* mWritePtr = nullptr; 447 448 std::atomic<uint32_t>* mEvFlagWord = nullptr; 449 450 /* 451 * This EventFlag object will be owned by the FMQ and will have the same 452 * lifetime. 453 */ 454 android::hardware::EventFlag* mEventFlag = nullptr; 455 }; 456 457 template <typename T, MQFlavor flavor> 458 T* MessageQueue<T, flavor>::MemTransaction::getSlot(size_t idx) { 459 size_t firstRegionLength = first.getLength(); 460 size_t secondRegionLength = second.getLength(); 461 462 if (idx > firstRegionLength + secondRegionLength) { 463 return nullptr; 464 } 465 466 if (idx < firstRegionLength) { 467 return first.getAddress() + idx; 468 } 469 470 return second.getAddress() + idx - firstRegionLength; 471 } 472 473 template <typename T, MQFlavor flavor> 474 bool MessageQueue<T, flavor>::MemTransaction::getMemRegionInfo(size_t startIdx, 475 size_t nMessages, 476 size_t& firstCount, 477 size_t& secondCount, 478 T** firstBaseAddress, 479 T** secondBaseAddress) { 480 size_t firstRegionLength = first.getLength(); 481 size_t secondRegionLength = second.getLength(); 482 483 if (startIdx + nMessages > firstRegionLength + secondRegionLength) { 484 /* 485 * Return false if 'nMessages' starting at 'startIdx' cannot be 486 * accomodated by the MemTransaction object. 487 */ 488 return false; 489 } 490 491 /* Number of messages to be read/written to the first MemRegion. */ 492 firstCount = startIdx < firstRegionLength ? 493 std::min(nMessages, firstRegionLength - startIdx) : 0; 494 495 /* Number of messages to be read/written to the second MemRegion. */ 496 secondCount = nMessages - firstCount; 497 498 if (firstCount != 0) { 499 *firstBaseAddress = first.getAddress() + startIdx; 500 } 501 502 if (secondCount != 0) { 503 size_t secondStartIdx = startIdx > firstRegionLength ? startIdx - firstRegionLength : 0; 504 *secondBaseAddress = second.getAddress() + secondStartIdx; 505 } 506 507 return true; 508 } 509 510 template <typename T, MQFlavor flavor> 511 bool MessageQueue<T, flavor>::MemTransaction::copyFrom(T* data, size_t startIdx, size_t nMessages) { 512 if (data == nullptr) { 513 return false; 514 } 515 516 size_t firstReadCount = 0, secondReadCount = 0; 517 T* firstBaseAddress = nullptr, * secondBaseAddress = nullptr; 518 519 if (getMemRegionInfo(startIdx, 520 nMessages, 521 firstReadCount, 522 secondReadCount, 523 &firstBaseAddress, 524 &secondBaseAddress) == false) { 525 /* 526 * Returns false if 'startIdx' and 'nMessages' are invalid for this 527 * MemTransaction object. 528 */ 529 return false; 530 } 531 532 if (firstReadCount != 0) { 533 memcpy(data, firstBaseAddress, firstReadCount * sizeof(T)); 534 } 535 536 if (secondReadCount != 0) { 537 memcpy(data + firstReadCount, 538 secondBaseAddress, 539 secondReadCount * sizeof(T)); 540 } 541 542 return true; 543 } 544 545 template <typename T, MQFlavor flavor> 546 bool MessageQueue<T, flavor>::MemTransaction::copyTo(const T* data, 547 size_t startIdx, 548 size_t nMessages) { 549 if (data == nullptr) { 550 return false; 551 } 552 553 size_t firstWriteCount = 0, secondWriteCount = 0; 554 T * firstBaseAddress = nullptr, * secondBaseAddress = nullptr; 555 556 if (getMemRegionInfo(startIdx, 557 nMessages, 558 firstWriteCount, 559 secondWriteCount, 560 &firstBaseAddress, 561 &secondBaseAddress) == false) { 562 /* 563 * Returns false if 'startIdx' and 'nMessages' are invalid for this 564 * MemTransaction object. 565 */ 566 return false; 567 } 568 569 if (firstWriteCount != 0) { 570 memcpy(firstBaseAddress, data, firstWriteCount * sizeof(T)); 571 } 572 573 if (secondWriteCount != 0) { 574 memcpy(secondBaseAddress, 575 data + firstWriteCount, 576 secondWriteCount * sizeof(T)); 577 } 578 579 return true; 580 } 581 582 template <typename T, MQFlavor flavor> 583 void MessageQueue<T, flavor>::initMemory(bool resetPointers) { 584 /* 585 * Verify that the the Descriptor contains the minimum number of grantors 586 * the native_handle is valid and T matches quantum size. 587 */ 588 if ((mDesc == nullptr) || !mDesc->isHandleValid() || 589 (mDesc->countGrantors() < Descriptor::kMinGrantorCount) || 590 (mDesc->getQuantum() != sizeof(T))) { 591 return; 592 } 593 594 if (flavor == kSynchronizedReadWrite) { 595 mReadPtr = reinterpret_cast<std::atomic<uint64_t>*>( 596 mapGrantorDescr(Descriptor::READPTRPOS)); 597 } else { 598 /* 599 * The unsynchronized write flavor of the FMQ may have multiple readers 600 * and each reader would have their own read pointer counter. 601 */ 602 mReadPtr = new (std::nothrow) std::atomic<uint64_t>; 603 } 604 605 details::check(mReadPtr != nullptr); 606 607 mWritePtr = 608 reinterpret_cast<std::atomic<uint64_t>*>(mapGrantorDescr(Descriptor::WRITEPTRPOS)); 609 details::check(mWritePtr != nullptr); 610 611 if (resetPointers) { 612 mReadPtr->store(0, std::memory_order_release); 613 mWritePtr->store(0, std::memory_order_release); 614 } else if (flavor != kSynchronizedReadWrite) { 615 // Always reset the read pointer. 616 mReadPtr->store(0, std::memory_order_release); 617 } 618 619 mRing = reinterpret_cast<uint8_t*>(mapGrantorDescr(Descriptor::DATAPTRPOS)); 620 details::check(mRing != nullptr); 621 622 mEvFlagWord = static_cast<std::atomic<uint32_t>*>(mapGrantorDescr(Descriptor::EVFLAGWORDPOS)); 623 if (mEvFlagWord != nullptr) { 624 android::hardware::EventFlag::createEventFlag(mEvFlagWord, &mEventFlag); 625 } 626 } 627 628 template <typename T, MQFlavor flavor> 629 MessageQueue<T, flavor>::MessageQueue(const Descriptor& Desc, bool resetPointers) { 630 mDesc = std::unique_ptr<Descriptor>(new (std::nothrow) Descriptor(Desc)); 631 if (mDesc == nullptr) { 632 return; 633 } 634 635 initMemory(resetPointers); 636 } 637 638 template <typename T, MQFlavor flavor> 639 MessageQueue<T, flavor>::MessageQueue(size_t numElementsInQueue, bool configureEventFlagWord) { 640 641 // Check if the buffer size would not overflow size_t 642 if (numElementsInQueue > SIZE_MAX / sizeof(T)) { 643 return; 644 } 645 /* 646 * The FMQ needs to allocate memory for the ringbuffer as well as for the 647 * read and write pointer counters. If an EventFlag word is to be configured, 648 * we also need to allocate memory for the same/ 649 */ 650 size_t kQueueSizeBytes = numElementsInQueue * sizeof(T); 651 size_t kMetaDataSize = 2 * sizeof(android::hardware::RingBufferPosition); 652 653 if (configureEventFlagWord) { 654 kMetaDataSize+= sizeof(std::atomic<uint32_t>); 655 } 656 657 /* 658 * Ashmem memory region size needs to be specified in page-aligned bytes. 659 * kQueueSizeBytes needs to be aligned to word boundary so that all offsets 660 * in the grantorDescriptor will be word aligned. 661 */ 662 size_t kAshmemSizePageAligned = 663 (Descriptor::alignToWordBoundary(kQueueSizeBytes) + kMetaDataSize + PAGE_SIZE - 1) & 664 ~(PAGE_SIZE - 1); 665 666 /* 667 * Create an ashmem region to map the memory for the ringbuffer, 668 * read counter and write counter. 669 */ 670 int ashmemFd = ashmem_create_region("MessageQueue", kAshmemSizePageAligned); 671 ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE); 672 673 /* 674 * The native handle will contain the fds to be mapped. 675 */ 676 native_handle_t* mqHandle = 677 native_handle_create(1 /* numFds */, 0 /* numInts */); 678 if (mqHandle == nullptr) { 679 return; 680 } 681 682 mqHandle->data[0] = ashmemFd; 683 mDesc = std::unique_ptr<Descriptor>(new (std::nothrow) Descriptor(kQueueSizeBytes, 684 mqHandle, 685 sizeof(T), 686 configureEventFlagWord)); 687 if (mDesc == nullptr) { 688 return; 689 } 690 initMemory(true); 691 } 692 693 template <typename T, MQFlavor flavor> 694 MessageQueue<T, flavor>::~MessageQueue() { 695 if (flavor == kUnsynchronizedWrite) { 696 delete mReadPtr; 697 } else { 698 unmapGrantorDescr(mReadPtr, Descriptor::READPTRPOS); 699 } 700 if (mWritePtr != nullptr) { 701 unmapGrantorDescr(mWritePtr, Descriptor::WRITEPTRPOS); 702 } 703 if (mRing != nullptr) { 704 unmapGrantorDescr(mRing, Descriptor::DATAPTRPOS); 705 } 706 if (mEvFlagWord != nullptr) { 707 unmapGrantorDescr(mEvFlagWord, Descriptor::EVFLAGWORDPOS); 708 android::hardware::EventFlag::deleteEventFlag(&mEventFlag); 709 } 710 } 711 712 template <typename T, MQFlavor flavor> 713 bool MessageQueue<T, flavor>::write(const T* data) { 714 return write(data, 1); 715 } 716 717 template <typename T, MQFlavor flavor> 718 bool MessageQueue<T, flavor>::read(T* data) { 719 return read(data, 1); 720 } 721 722 template <typename T, MQFlavor flavor> 723 bool MessageQueue<T, flavor>::write(const T* data, size_t nMessages) { 724 MemTransaction tx; 725 return beginWrite(nMessages, &tx) && 726 tx.copyTo(data, 0 /* startIdx */, nMessages) && 727 commitWrite(nMessages); 728 } 729 730 template <typename T, MQFlavor flavor> 731 bool MessageQueue<T, flavor>::writeBlocking(const T* data, 732 size_t count, 733 uint32_t readNotification, 734 uint32_t writeNotification, 735 int64_t timeOutNanos, 736 android::hardware::EventFlag* evFlag) { 737 static_assert(flavor == kSynchronizedReadWrite, 738 "writeBlocking can only be used with the " 739 "kSynchronizedReadWrite flavor."); 740 /* 741 * If evFlag is null and the FMQ does not have its own EventFlag object 742 * return false; 743 * If the flavor is kSynchronizedReadWrite and the readNotification 744 * bit mask is zero return false; 745 * If the count is greater than queue size, return false 746 * to prevent blocking until timeOut. 747 */ 748 if (evFlag == nullptr) { 749 evFlag = mEventFlag; 750 if (evFlag == nullptr) { 751 details::logError( 752 "writeBlocking failed: called on MessageQueue with no Eventflag" 753 "configured or provided"); 754 return false; 755 } 756 } 757 758 if (readNotification == 0 || (count > getQuantumCount())) { 759 return false; 760 } 761 762 /* 763 * There is no need to wait for a readNotification if there is sufficient 764 * space to write is already present in the FMQ. The latter would be the case when 765 * read operations read more number of messages than write operations write. 766 * In other words, a single large read may clear the FMQ after multiple small 767 * writes. This would fail to clear a pending readNotification bit since 768 * EventFlag bits can only be cleared by a wait() call, however the bit would 769 * be correctly cleared by the next writeBlocking() call. 770 */ 771 772 bool result = write(data, count); 773 if (result) { 774 if (writeNotification) { 775 evFlag->wake(writeNotification); 776 } 777 return result; 778 } 779 780 bool shouldTimeOut = timeOutNanos != 0; 781 int64_t prevTimeNanos = shouldTimeOut ? android::elapsedRealtimeNano() : 0; 782 783 while (true) { 784 /* It is not required to adjust 'timeOutNanos' if 'shouldTimeOut' is false */ 785 if (shouldTimeOut) { 786 /* 787 * The current time and 'prevTimeNanos' are both CLOCK_BOOTTIME clock values(converted 788 * to Nanoseconds) 789 */ 790 int64_t currentTimeNs = android::elapsedRealtimeNano(); 791 /* 792 * Decrement 'timeOutNanos' to account for the time taken to complete the last 793 * iteration of the while loop. 794 */ 795 timeOutNanos -= currentTimeNs - prevTimeNanos; 796 prevTimeNanos = currentTimeNs; 797 798 if (timeOutNanos <= 0) { 799 /* 800 * Attempt write in case a context switch happened outside of 801 * evFlag->wait(). 802 */ 803 result = write(data, count); 804 break; 805 } 806 } 807 808 /* 809 * wait() will return immediately if there was a pending read 810 * notification. 811 */ 812 uint32_t efState = 0; 813 status_t status = evFlag->wait(readNotification, 814 &efState, 815 timeOutNanos, 816 true /* retry on spurious wake */); 817 818 if (status != android::TIMED_OUT && status != android::NO_ERROR) { 819 details::logError("Unexpected error code from EventFlag Wait status " + std::to_string(status)); 820 break; 821 } 822 823 if (status == android::TIMED_OUT) { 824 break; 825 } 826 827 /* 828 * If there is still insufficient space to write to the FMQ, 829 * keep waiting for another readNotification. 830 */ 831 if ((efState & readNotification) && write(data, count)) { 832 result = true; 833 break; 834 } 835 } 836 837 if (result && writeNotification != 0) { 838 evFlag->wake(writeNotification); 839 } 840 841 return result; 842 } 843 844 template <typename T, MQFlavor flavor> 845 bool MessageQueue<T, flavor>::writeBlocking(const T* data, 846 size_t count, 847 int64_t timeOutNanos) { 848 return writeBlocking(data, count, FMQ_NOT_FULL, FMQ_NOT_EMPTY, timeOutNanos); 849 } 850 851 template <typename T, MQFlavor flavor> 852 bool MessageQueue<T, flavor>::readBlocking(T* data, 853 size_t count, 854 uint32_t readNotification, 855 uint32_t writeNotification, 856 int64_t timeOutNanos, 857 android::hardware::EventFlag* evFlag) { 858 static_assert(flavor == kSynchronizedReadWrite, 859 "readBlocking can only be used with the " 860 "kSynchronizedReadWrite flavor."); 861 862 /* 863 * If evFlag is null and the FMQ does not own its own EventFlag object 864 * return false; 865 * If the writeNotification bit mask is zero return false; 866 * If the count is greater than queue size, return false to prevent 867 * blocking until timeOut. 868 */ 869 if (evFlag == nullptr) { 870 evFlag = mEventFlag; 871 if (evFlag == nullptr) { 872 details::logError( 873 "readBlocking failed: called on MessageQueue with no Eventflag" 874 "configured or provided"); 875 return false; 876 } 877 } 878 879 if (writeNotification == 0 || count > getQuantumCount()) { 880 return false; 881 } 882 883 /* 884 * There is no need to wait for a write notification if sufficient 885 * data to read is already present in the FMQ. This would be the 886 * case when read operations read lesser number of messages than 887 * a write operation and multiple reads would be required to clear the queue 888 * after a single write operation. This check would fail to clear a pending 889 * writeNotification bit since EventFlag bits can only be cleared 890 * by a wait() call, however the bit would be correctly cleared by the next 891 * readBlocking() call. 892 */ 893 894 bool result = read(data, count); 895 if (result) { 896 if (readNotification) { 897 evFlag->wake(readNotification); 898 } 899 return result; 900 } 901 902 bool shouldTimeOut = timeOutNanos != 0; 903 int64_t prevTimeNanos = shouldTimeOut ? android::elapsedRealtimeNano() : 0; 904 905 while (true) { 906 /* It is not required to adjust 'timeOutNanos' if 'shouldTimeOut' is false */ 907 if (shouldTimeOut) { 908 /* 909 * The current time and 'prevTimeNanos' are both CLOCK_BOOTTIME clock values(converted 910 * to Nanoseconds) 911 */ 912 int64_t currentTimeNs = android::elapsedRealtimeNano(); 913 /* 914 * Decrement 'timeOutNanos' to account for the time taken to complete the last 915 * iteration of the while loop. 916 */ 917 timeOutNanos -= currentTimeNs - prevTimeNanos; 918 prevTimeNanos = currentTimeNs; 919 920 if (timeOutNanos <= 0) { 921 /* 922 * Attempt read in case a context switch happened outside of 923 * evFlag->wait(). 924 */ 925 result = read(data, count); 926 break; 927 } 928 } 929 930 /* 931 * wait() will return immediately if there was a pending write 932 * notification. 933 */ 934 uint32_t efState = 0; 935 status_t status = evFlag->wait(writeNotification, 936 &efState, 937 timeOutNanos, 938 true /* retry on spurious wake */); 939 940 if (status != android::TIMED_OUT && status != android::NO_ERROR) { 941 details::logError("Unexpected error code from EventFlag Wait status " + std::to_string(status)); 942 break; 943 } 944 945 if (status == android::TIMED_OUT) { 946 break; 947 } 948 949 /* 950 * If the data in FMQ is still insufficient, go back to waiting 951 * for another write notification. 952 */ 953 if ((efState & writeNotification) && read(data, count)) { 954 result = true; 955 break; 956 } 957 } 958 959 if (result && readNotification != 0) { 960 evFlag->wake(readNotification); 961 } 962 return result; 963 } 964 965 template <typename T, MQFlavor flavor> 966 bool MessageQueue<T, flavor>::readBlocking(T* data, size_t count, int64_t timeOutNanos) { 967 return readBlocking(data, count, FMQ_NOT_FULL, FMQ_NOT_EMPTY, timeOutNanos); 968 } 969 970 template <typename T, MQFlavor flavor> 971 size_t MessageQueue<T, flavor>::availableToWriteBytes() const { 972 return mDesc->getSize() - availableToReadBytes(); 973 } 974 975 template <typename T, MQFlavor flavor> 976 size_t MessageQueue<T, flavor>::availableToWrite() const { 977 return availableToWriteBytes() / sizeof(T); 978 } 979 980 template <typename T, MQFlavor flavor> 981 size_t MessageQueue<T, flavor>::availableToRead() const { 982 return availableToReadBytes() / sizeof(T); 983 } 984 985 template <typename T, MQFlavor flavor> 986 bool MessageQueue<T, flavor>::beginWrite(size_t nMessages, MemTransaction* result) const { 987 /* 988 * If nMessages is greater than size of FMQ or in case of the synchronized 989 * FMQ flavor, if there is not enough space to write nMessages, then return 990 * result with null addresses. 991 */ 992 if ((flavor == kSynchronizedReadWrite && (availableToWrite() < nMessages)) || 993 nMessages > getQuantumCount()) { 994 *result = MemTransaction(); 995 return false; 996 } 997 998 auto writePtr = mWritePtr->load(std::memory_order_relaxed); 999 size_t writeOffset = writePtr % mDesc->getSize(); 1000 1001 /* 1002 * From writeOffset, the number of messages that can be written 1003 * contiguously without wrapping around the ring buffer are calculated. 1004 */ 1005 size_t contiguousMessages = (mDesc->getSize() - writeOffset) / sizeof(T); 1006 1007 if (contiguousMessages < nMessages) { 1008 /* 1009 * Wrap around is required. Both result.first and result.second are 1010 * populated. 1011 */ 1012 *result = MemTransaction(MemRegion(reinterpret_cast<T*>(mRing + writeOffset), 1013 contiguousMessages), 1014 MemRegion(reinterpret_cast<T*>(mRing), 1015 nMessages - contiguousMessages)); 1016 } else { 1017 /* 1018 * A wrap around is not required to write nMessages. Only result.first 1019 * is populated. 1020 */ 1021 *result = MemTransaction(MemRegion(reinterpret_cast<T*>(mRing + writeOffset), nMessages), 1022 MemRegion()); 1023 } 1024 1025 return true; 1026 } 1027 1028 template <typename T, MQFlavor flavor> 1029 /* 1030 * Disable integer sanitization since integer overflow here is allowed 1031 * and legal. 1032 */ 1033 __attribute__((no_sanitize("integer"))) 1034 bool MessageQueue<T, flavor>::commitWrite(size_t nMessages) { 1035 size_t nBytesWritten = nMessages * sizeof(T); 1036 auto writePtr = mWritePtr->load(std::memory_order_relaxed); 1037 writePtr += nBytesWritten; 1038 mWritePtr->store(writePtr, std::memory_order_release); 1039 /* 1040 * This method cannot fail now since we are only incrementing the writePtr 1041 * counter. 1042 */ 1043 return true; 1044 } 1045 1046 template <typename T, MQFlavor flavor> 1047 size_t MessageQueue<T, flavor>::availableToReadBytes() const { 1048 /* 1049 * This method is invoked by implementations of both read() and write() and 1050 * hence requries a memory_order_acquired load for both mReadPtr and 1051 * mWritePtr. 1052 */ 1053 return mWritePtr->load(std::memory_order_acquire) - 1054 mReadPtr->load(std::memory_order_acquire); 1055 } 1056 1057 template <typename T, MQFlavor flavor> 1058 bool MessageQueue<T, flavor>::read(T* data, size_t nMessages) { 1059 MemTransaction tx; 1060 return beginRead(nMessages, &tx) && 1061 tx.copyFrom(data, 0 /* startIdx */, nMessages) && 1062 commitRead(nMessages); 1063 } 1064 1065 template <typename T, MQFlavor flavor> 1066 /* 1067 * Disable integer sanitization since integer overflow here is allowed 1068 * and legal. 1069 */ 1070 __attribute__((no_sanitize("integer"))) 1071 bool MessageQueue<T, flavor>::beginRead(size_t nMessages, MemTransaction* result) const { 1072 *result = MemTransaction(); 1073 /* 1074 * If it is detected that the data in the queue was overwritten 1075 * due to the reader process being too slow, the read pointer counter 1076 * is set to the same as the write pointer counter to indicate error 1077 * and the read returns false; 1078 * Need acquire/release memory ordering for mWritePtr. 1079 */ 1080 auto writePtr = mWritePtr->load(std::memory_order_acquire); 1081 /* 1082 * A relaxed load is sufficient for mReadPtr since there will be no 1083 * stores to mReadPtr from a different thread. 1084 */ 1085 auto readPtr = mReadPtr->load(std::memory_order_relaxed); 1086 1087 if (writePtr - readPtr > mDesc->getSize()) { 1088 mReadPtr->store(writePtr, std::memory_order_release); 1089 return false; 1090 } 1091 1092 size_t nBytesDesired = nMessages * sizeof(T); 1093 /* 1094 * Return if insufficient data to read in FMQ. 1095 */ 1096 if (writePtr - readPtr < nBytesDesired) { 1097 return false; 1098 } 1099 1100 size_t readOffset = readPtr % mDesc->getSize(); 1101 /* 1102 * From readOffset, the number of messages that can be read contiguously 1103 * without wrapping around the ring buffer are calculated. 1104 */ 1105 size_t contiguousMessages = (mDesc->getSize() - readOffset) / sizeof(T); 1106 1107 if (contiguousMessages < nMessages) { 1108 /* 1109 * A wrap around is required. Both result.first and result.second 1110 * are populated. 1111 */ 1112 *result = MemTransaction(MemRegion(reinterpret_cast<T*>(mRing + readOffset), 1113 contiguousMessages), 1114 MemRegion(reinterpret_cast<T*>(mRing), 1115 nMessages - contiguousMessages)); 1116 } else { 1117 /* 1118 * A wrap around is not required. Only result.first need to be 1119 * populated. 1120 */ 1121 *result = MemTransaction(MemRegion(reinterpret_cast<T*>(mRing + readOffset), nMessages), 1122 MemRegion()); 1123 } 1124 1125 return true; 1126 } 1127 1128 template <typename T, MQFlavor flavor> 1129 /* 1130 * Disable integer sanitization since integer overflow here is allowed 1131 * and legal. 1132 */ 1133 __attribute__((no_sanitize("integer"))) 1134 bool MessageQueue<T, flavor>::commitRead(size_t nMessages) { 1135 // TODO: Use a local copy of readPtr to avoid relazed mReadPtr loads. 1136 auto readPtr = mReadPtr->load(std::memory_order_relaxed); 1137 auto writePtr = mWritePtr->load(std::memory_order_acquire); 1138 /* 1139 * If the flavor is unsynchronized, it is possible that a write overflow may 1140 * have occured between beginRead() and commitRead(). 1141 */ 1142 if (writePtr - readPtr > mDesc->getSize()) { 1143 mReadPtr->store(writePtr, std::memory_order_release); 1144 return false; 1145 } 1146 1147 size_t nBytesRead = nMessages * sizeof(T); 1148 readPtr += nBytesRead; 1149 mReadPtr->store(readPtr, std::memory_order_release); 1150 return true; 1151 } 1152 1153 template <typename T, MQFlavor flavor> 1154 size_t MessageQueue<T, flavor>::getQuantumSize() const { 1155 return mDesc->getQuantum(); 1156 } 1157 1158 template <typename T, MQFlavor flavor> 1159 size_t MessageQueue<T, flavor>::getQuantumCount() const { 1160 return mDesc->getSize() / mDesc->getQuantum(); 1161 } 1162 1163 template <typename T, MQFlavor flavor> 1164 bool MessageQueue<T, flavor>::isValid() const { 1165 return mRing != nullptr && mReadPtr != nullptr && mWritePtr != nullptr; 1166 } 1167 1168 template <typename T, MQFlavor flavor> 1169 void* MessageQueue<T, flavor>::mapGrantorDescr(uint32_t grantorIdx) { 1170 const native_handle_t* handle = mDesc->handle(); 1171 auto grantors = mDesc->grantors(); 1172 if ((handle == nullptr) || (grantorIdx >= grantors.size())) { 1173 return nullptr; 1174 } 1175 1176 int fdIndex = grantors[grantorIdx].fdIndex; 1177 /* 1178 * Offset for mmap must be a multiple of PAGE_SIZE. 1179 */ 1180 int mapOffset = (grantors[grantorIdx].offset / PAGE_SIZE) * PAGE_SIZE; 1181 int mapLength = 1182 grantors[grantorIdx].offset - mapOffset + grantors[grantorIdx].extent; 1183 1184 void* address = mmap(0, mapLength, PROT_READ | PROT_WRITE, MAP_SHARED, 1185 handle->data[fdIndex], mapOffset); 1186 return (address == MAP_FAILED) 1187 ? nullptr 1188 : reinterpret_cast<uint8_t*>(address) + 1189 (grantors[grantorIdx].offset - mapOffset); 1190 } 1191 1192 template <typename T, MQFlavor flavor> 1193 void MessageQueue<T, flavor>::unmapGrantorDescr(void* address, 1194 uint32_t grantorIdx) { 1195 auto grantors = mDesc->grantors(); 1196 if ((address == nullptr) || (grantorIdx >= grantors.size())) { 1197 return; 1198 } 1199 1200 int mapOffset = (grantors[grantorIdx].offset / PAGE_SIZE) * PAGE_SIZE; 1201 int mapLength = 1202 grantors[grantorIdx].offset - mapOffset + grantors[grantorIdx].extent; 1203 void* baseAddress = reinterpret_cast<uint8_t*>(address) - 1204 (grantors[grantorIdx].offset - mapOffset); 1205 if (baseAddress) munmap(baseAddress, mapLength); 1206 } 1207 1208 } // namespace hardware 1209 } // namespace android 1210 #endif // HIDL_MQ_H 1211