Home | History | Annotate | Download | only in libhidlcache
      1 /*
      2  * Copyright (C) 2007 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "MemoryDealer"
     18 
     19 #include <hidlcache/MemoryDealer.h>
     20 #include <hidlmemory/HidlMemoryToken.h>
     21 #include <hidlmemory/mapping.h>
     22 
     23 #include <list>
     24 
     25 #include <log/log.h>
     26 
     27 #include <errno.h>
     28 #include <fcntl.h>
     29 #include <stdint.h>
     30 #include <stdio.h>
     31 #include <stdlib.h>
     32 #include <string.h>
     33 #include <unistd.h>
     34 
     35 #include <sys/file.h>
     36 #include <sys/mman.h>
     37 #include <sys/stat.h>
     38 #include <sys/types.h>
     39 
     40 using std::string;
     41 
     42 namespace android {
     43 namespace hardware {
     44 
     45 class SimpleBestFitAllocator {
     46     enum { PAGE_ALIGNED = 0x00000001 };
     47 
     48    public:
     49     explicit SimpleBestFitAllocator(size_t size);
     50     ~SimpleBestFitAllocator();
     51 
     52     size_t allocate(size_t size, uint32_t flags = 0);
     53     status_t deallocate(size_t offset);
     54     size_t size() const;
     55     void dump(const char* tag) const;
     56     void dump(string& res, const char* tag) const;
     57 
     58     static size_t getAllocationAlignment() { return kMemoryAlign; }
     59 
     60    private:
     61     struct chunk_t {
     62         chunk_t(size_t start, size_t size) : start(start), size(size), free(1) {}
     63         size_t start;
     64         size_t size : 28;
     65         int free : 4;
     66     };
     67     using List = std::list<chunk_t*>;
     68     using Iterator = std::list<chunk_t*>::iterator;
     69     using IteratorConst = std::list<chunk_t*>::const_iterator;
     70     using Mutex = std::mutex;
     71     using Lock = std::lock_guard<Mutex>;
     72 
     73     ssize_t alloc(size_t size, uint32_t flags);
     74     chunk_t* dealloc(size_t start);
     75     void dump_l(const char* tag) const;
     76     void dump_l(string& res, const char* tag) const;
     77 
     78     static const int kMemoryAlign;
     79     mutable Mutex mLock;
     80     List mList;
     81     size_t mHeapSize;
     82 };
     83 
     84 MemoryDealer::MemoryDealer(size_t size) : mAllocator(new SimpleBestFitAllocator(size)) {}
     85 
     86 MemoryDealer::~MemoryDealer() {
     87     delete mAllocator;
     88 }
     89 
     90 ssize_t MemoryDealer::allocateOffset(size_t size) {
     91     return mAllocator->allocate(size);
     92 }
     93 
     94 void MemoryDealer::deallocate(size_t offset) {
     95     mAllocator->deallocate(offset);
     96 }
     97 
     98 void MemoryDealer::dump(const char* tag) const {
     99     mAllocator->dump(tag);
    100 }
    101 
    102 size_t MemoryDealer::getAllocationAlignment() {
    103     return SimpleBestFitAllocator::getAllocationAlignment();
    104 }
    105 
    106 // align all the memory blocks on a cache-line boundary
    107 const int SimpleBestFitAllocator::kMemoryAlign = 32;
    108 
    109 SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size) {
    110     size_t pagesize = getpagesize();
    111     mHeapSize = ((size + pagesize - 1) & ~(pagesize - 1));
    112 
    113     chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
    114     mList.push_front(node);
    115 }
    116 
    117 SimpleBestFitAllocator::~SimpleBestFitAllocator() {
    118     while (mList.size() != 0) {
    119         chunk_t* removed = mList.front();
    120         mList.pop_front();
    121 #ifdef __clang_analyzer__
    122         // Clang static analyzer gets confused in this loop
    123         // and generates a false positive warning about accessing
    124         // memory that is already freed.
    125         // Add an "assert" to avoid the confusion.
    126         LOG_ALWAYS_FATAL_IF(mList.head() == removed);
    127 #endif
    128         delete removed;
    129     }
    130 }
    131 
    132 size_t SimpleBestFitAllocator::size() const {
    133     return mHeapSize;
    134 }
    135 
    136 size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags) {
    137     Lock lock(mLock);
    138     ssize_t offset = alloc(size, flags);
    139     return offset;
    140 }
    141 
    142 status_t SimpleBestFitAllocator::deallocate(size_t offset) {
    143     Lock lock(mLock);
    144     chunk_t const* const freed = dealloc(offset);
    145     if (freed) {
    146         return NO_ERROR;
    147     }
    148     return NAME_NOT_FOUND;
    149 }
    150 
    151 ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags) {
    152     if (size == 0) {
    153         return 0;
    154     }
    155     size = (size + kMemoryAlign - 1) / kMemoryAlign;
    156     size_t pagesize = getpagesize();
    157 
    158     Iterator free_chunk_p = mList.end();
    159     for (Iterator p = mList.begin(); p != mList.end(); p++) {
    160         chunk_t* cur = *p;
    161         int extra = 0;
    162         if (flags & PAGE_ALIGNED) extra = (-cur->start & ((pagesize / kMemoryAlign) - 1));
    163 
    164         // best fit
    165         if (cur->free && (cur->size >= (size + extra))) {
    166             if ((free_chunk_p == mList.end()) || (cur->size < (*free_chunk_p)->size)) {
    167                 free_chunk_p = p;
    168             }
    169             if (cur->size == size) {
    170                 break;
    171             }
    172         }
    173     }
    174     if (free_chunk_p != mList.end()) {
    175         chunk_t* free_chunk = *free_chunk_p;
    176         const size_t free_size = free_chunk->size;
    177         free_chunk->free = 0;
    178         free_chunk->size = size;
    179         if (free_size > size) {
    180             int extra = 0;
    181             if (flags & PAGE_ALIGNED)
    182                 extra = (-free_chunk->start & ((pagesize / kMemoryAlign) - 1));
    183             if (extra) {
    184                 chunk_t* split = new chunk_t(free_chunk->start, extra);
    185                 free_chunk->start += extra;
    186                 mList.insert(free_chunk_p, split);
    187             }
    188 
    189             ALOGE_IF(
    190                 (flags & PAGE_ALIGNED) && ((free_chunk->start * kMemoryAlign) & (pagesize - 1)),
    191                 "PAGE_ALIGNED requested, but page is not aligned!!!");
    192 
    193             const ssize_t tail_free = free_size - (size + extra);
    194             if (tail_free > 0) {
    195                 chunk_t* split = new chunk_t(free_chunk->start + free_chunk->size, tail_free);
    196                 mList.insert(++free_chunk_p, split);
    197             }
    198         }
    199         return (free_chunk->start) * kMemoryAlign;
    200     }
    201     return NO_MEMORY;
    202 }
    203 
    204 SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start) {
    205     start = start / kMemoryAlign;
    206 
    207     for (Iterator pos = mList.begin(); pos != mList.end(); pos++) {
    208         chunk_t* cur = *pos;
    209         if (cur->start == start) {
    210             LOG_FATAL_IF(cur->free, "block at offset 0x%08lX of size 0x%08lX already freed",
    211                          cur->start * kMemoryAlign, cur->size * kMemoryAlign);
    212 
    213             // merge freed blocks together
    214             chunk_t* freed = cur;
    215             cur->free = 1;
    216             do {
    217                 if (pos != mList.begin()) {
    218                     pos--;
    219                     chunk_t* const p = *pos;
    220                     pos++;
    221                     if (p->free || !cur->size) {
    222                         freed = p;
    223                         p->size += cur->size;
    224                         mList.erase(pos);
    225                         delete cur;
    226                     }
    227                 }
    228                 if (++pos == mList.end()) break;
    229                 cur = *pos;
    230             } while (cur && cur->free);
    231 
    232 #ifndef NDEBUG
    233             if (!freed->free) {
    234                 dump_l("dealloc (!freed->free)");
    235             }
    236 #endif
    237             LOG_FATAL_IF(!freed->free, "freed block at offset 0x%08lX of size 0x%08lX is not free!",
    238                          freed->start * kMemoryAlign, freed->size * kMemoryAlign);
    239 
    240             return freed;
    241         }
    242     }
    243     return 0;
    244 }
    245 
    246 void SimpleBestFitAllocator::dump(const char* tag) const {
    247     Lock lock(mLock);
    248     dump_l(tag);
    249 }
    250 
    251 void SimpleBestFitAllocator::dump_l(const char* tag) const {
    252     string result;
    253     dump_l(result, tag);
    254     ALOGD("%s", result.c_str());
    255 }
    256 
    257 void SimpleBestFitAllocator::dump(string& result, const char* tag) const {
    258     Lock lock(mLock);
    259     dump_l(result, tag);
    260 }
    261 
    262 void SimpleBestFitAllocator::dump_l(string& result, const char* tag) const {
    263     size_t size = 0;
    264     int32_t i = 0;
    265     const size_t SIZE = 256;
    266     char buffer[SIZE];
    267     snprintf(buffer, SIZE, "  %s (%p, size=%u)\n", tag, this, (unsigned int)mHeapSize);
    268 
    269     result.append(buffer);
    270 
    271     for (IteratorConst pos = mList.begin(); pos != mList.end(); pos++) {
    272         chunk_t const* cur = *pos;
    273 
    274         if (!cur->free) size += cur->size * kMemoryAlign;
    275 
    276         i++;
    277     }
    278     snprintf(buffer, SIZE, "  size allocated: %u (%u KB)\n", int(size), int(size / 1024));
    279     result.append(buffer);
    280 }
    281 
    282 bool HidlMemoryDealer::isOk(const MemoryBlock& memblk) {
    283     return memblk.token != nullptr;
    284 }
    285 
    286 sp<::android::hidl::memory::V1_0::IMemory> HidlMemoryDealer::heap() {
    287     return mHeap;
    288 }
    289 
    290 // The required heap size alignment is 4096 bytes
    291 static const uint64_t kHeapSizeAlignment = (0x1ULL << 12);
    292 
    293 sp<HidlMemoryDealer> HidlMemoryDealer::getInstance(const hidl_memory& mem) {
    294     uint64_t msk = (kHeapSizeAlignment - 1);
    295     if (mem.size() & msk || !(mem.size() & ~msk)) {
    296         ALOGE("size is not aligned to %x", static_cast<uint32_t>(kHeapSizeAlignment));
    297         return nullptr;
    298     }
    299     sp<IMemory> heap = mapMemory(mem);
    300     if (heap == nullptr) {
    301         ALOGE("fail to mapMemory");
    302         return nullptr;
    303     }
    304     return new HidlMemoryDealer(heap, mem);
    305 }
    306 
    307 HidlMemoryDealer::HidlMemoryDealer(sp<IMemory> heap, const hidl_memory& mem)
    308     : MemoryDealer(heap->getSize()),
    309       mHeap(heap),
    310       mToken(new HidlMemoryToken(HidlMemory::getInstance(mem))) {}
    311 
    312 ::android::hidl::memory::block::V1_0::MemoryBlock HidlMemoryDealer::allocate(size_t size) {
    313     MemoryBlock memblk = {nullptr, 0xFFFFFFFFULL, 0xFFFFFFFFULL};
    314     ssize_t offset = allocateOffset(size);
    315     if (offset >= 0) {
    316         memblk.token = mToken;
    317         memblk.size = size;
    318         memblk.offset = offset;
    319     }
    320     return memblk;
    321 }
    322 
    323 };  // namespace hardware
    324 };  // namespace android
    325