Home | History | Annotate | Download | only in binder
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "IMemory"
     18 
     19 #include <atomic>
     20 #include <stdatomic.h>
     21 
     22 #include <fcntl.h>
     23 #include <stdint.h>
     24 #include <stdio.h>
     25 #include <stdlib.h>
     26 #include <sys/types.h>
     27 #include <sys/mman.h>
     28 #include <unistd.h>
     29 
     30 #include <binder/IMemory.h>
     31 #include <binder/Parcel.h>
     32 #include <log/log.h>
     33 
     34 #include <utils/KeyedVector.h>
     35 #include <utils/threads.h>
     36 
     37 #define VERBOSE   0
     38 
     39 namespace android {
     40 // ---------------------------------------------------------------------------
     41 
     42 class HeapCache : public IBinder::DeathRecipient
     43 {
     44 public:
     45     HeapCache();
     46     virtual ~HeapCache();
     47 
     48     virtual void binderDied(const wp<IBinder>& who);
     49 
     50     sp<IMemoryHeap> find_heap(const sp<IBinder>& binder);
     51     void free_heap(const sp<IBinder>& binder);
     52     sp<IMemoryHeap> get_heap(const sp<IBinder>& binder);
     53     void dump_heaps();
     54 
     55 private:
     56     // For IMemory.cpp
     57     struct heap_info_t {
     58         sp<IMemoryHeap> heap;
     59         int32_t         count;
     60         // Note that this cannot be meaningfully copied.
     61     };
     62 
     63     void free_heap(const wp<IBinder>& binder);
     64 
     65     Mutex mHeapCacheLock;  // Protects entire vector below.
     66     KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
     67     // We do not use the copy-on-write capabilities of KeyedVector.
     68     // TODO: Reimplemement based on standard C++ container?
     69 };
     70 
     71 static sp<HeapCache> gHeapCache = new HeapCache();
     72 
     73 /******************************************************************************/
     74 
     75 enum {
     76     HEAP_ID = IBinder::FIRST_CALL_TRANSACTION
     77 };
     78 
     79 class BpMemoryHeap : public BpInterface<IMemoryHeap>
     80 {
     81 public:
     82     explicit BpMemoryHeap(const sp<IBinder>& impl);
     83     virtual ~BpMemoryHeap();
     84 
     85     virtual int getHeapID() const;
     86     virtual void* getBase() const;
     87     virtual size_t getSize() const;
     88     virtual uint32_t getFlags() const;
     89     off_t getOffset() const override;
     90 
     91 private:
     92     friend class IMemory;
     93     friend class HeapCache;
     94 
     95     // for debugging in this module
     96     static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) {
     97         return gHeapCache->find_heap(binder);
     98     }
     99     static inline void free_heap(const sp<IBinder>& binder) {
    100         gHeapCache->free_heap(binder);
    101     }
    102     static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) {
    103         return gHeapCache->get_heap(binder);
    104     }
    105     static inline void dump_heaps() {
    106         gHeapCache->dump_heaps();
    107     }
    108 
    109     void assertMapped() const;
    110     void assertReallyMapped() const;
    111 
    112     mutable std::atomic<int32_t> mHeapId;
    113     mutable void*       mBase;
    114     mutable size_t      mSize;
    115     mutable uint32_t    mFlags;
    116     mutable off_t       mOffset;
    117     mutable bool        mRealHeap;
    118     mutable Mutex       mLock;
    119 };
    120 
    121 // ----------------------------------------------------------------------------
    122 
    123 enum {
    124     GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION
    125 };
    126 
    127 class BpMemory : public BpInterface<IMemory>
    128 {
    129 public:
    130     explicit BpMemory(const sp<IBinder>& impl);
    131     virtual ~BpMemory();
    132     // NOLINTNEXTLINE(google-default-arguments)
    133     virtual sp<IMemoryHeap> getMemory(ssize_t* offset=nullptr, size_t* size=nullptr) const;
    134 
    135 private:
    136     mutable sp<IMemoryHeap> mHeap;
    137     mutable ssize_t mOffset;
    138     mutable size_t mSize;
    139 };
    140 
    141 /******************************************************************************/
    142 
    143 void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const
    144 {
    145     sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder);
    146     void* const base = realHeap->base();
    147     if (base == MAP_FAILED)
    148         return nullptr;
    149     return static_cast<char*>(base) + offset;
    150 }
    151 
    152 void* IMemory::pointer() const {
    153     ssize_t offset;
    154     sp<IMemoryHeap> heap = getMemory(&offset);
    155     void* const base = heap!=nullptr ? heap->base() : MAP_FAILED;
    156     if (base == MAP_FAILED)
    157         return nullptr;
    158     return static_cast<char*>(base) + offset;
    159 }
    160 
    161 size_t IMemory::size() const {
    162     size_t size;
    163     getMemory(nullptr, &size);
    164     return size;
    165 }
    166 
    167 ssize_t IMemory::offset() const {
    168     ssize_t offset;
    169     getMemory(&offset);
    170     return offset;
    171 }
    172 
    173 /******************************************************************************/
    174 
    175 BpMemory::BpMemory(const sp<IBinder>& impl)
    176     : BpInterface<IMemory>(impl), mOffset(0), mSize(0)
    177 {
    178 }
    179 
    180 BpMemory::~BpMemory()
    181 {
    182 }
    183 
    184 // NOLINTNEXTLINE(google-default-arguments)
    185 sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const
    186 {
    187     if (mHeap == nullptr) {
    188         Parcel data, reply;
    189         data.writeInterfaceToken(IMemory::getInterfaceDescriptor());
    190         if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) {
    191             sp<IBinder> heap = reply.readStrongBinder();
    192             if (heap != nullptr) {
    193                 mHeap = interface_cast<IMemoryHeap>(heap);
    194                 if (mHeap != nullptr) {
    195                     const int64_t offset64 = reply.readInt64();
    196                     const uint64_t size64 = reply.readUint64();
    197                     const ssize_t o = (ssize_t)offset64;
    198                     const size_t s = (size_t)size64;
    199                     size_t heapSize = mHeap->getSize();
    200                     if (s == size64 && o == offset64 // ILP32 bounds check
    201                             && s <= heapSize
    202                             && o >= 0
    203                             && (static_cast<size_t>(o) <= heapSize - s)) {
    204                         mOffset = o;
    205                         mSize = s;
    206                     } else {
    207                         // Hm.
    208                         android_errorWriteWithInfoLog(0x534e4554,
    209                             "26877992", -1, nullptr, 0);
    210                         mOffset = 0;
    211                         mSize = 0;
    212                     }
    213                 }
    214             }
    215         }
    216     }
    217     if (offset) *offset = mOffset;
    218     if (size) *size = mSize;
    219     return (mSize > 0) ? mHeap : nullptr;
    220 }
    221 
    222 // ---------------------------------------------------------------------------
    223 
    224 IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory");
    225 
    226 BnMemory::BnMemory() {
    227 }
    228 
    229 BnMemory::~BnMemory() {
    230 }
    231 
    232 // NOLINTNEXTLINE(google-default-arguments)
    233 status_t BnMemory::onTransact(
    234     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
    235 {
    236     switch(code) {
    237         case GET_MEMORY: {
    238             CHECK_INTERFACE(IMemory, data, reply);
    239             ssize_t offset;
    240             size_t size;
    241             reply->writeStrongBinder( IInterface::asBinder(getMemory(&offset, &size)) );
    242             reply->writeInt64(offset);
    243             reply->writeUint64(size);
    244             return NO_ERROR;
    245         } break;
    246         default:
    247             return BBinder::onTransact(code, data, reply, flags);
    248     }
    249 }
    250 
    251 
    252 /******************************************************************************/
    253 
    254 BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
    255     : BpInterface<IMemoryHeap>(impl),
    256         mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false)
    257 {
    258 }
    259 
    260 BpMemoryHeap::~BpMemoryHeap() {
    261     int32_t heapId = mHeapId.load(memory_order_relaxed);
    262     if (heapId != -1) {
    263         close(heapId);
    264         if (mRealHeap) {
    265             // by construction we're the last one
    266             if (mBase != MAP_FAILED) {
    267                 sp<IBinder> binder = IInterface::asBinder(this);
    268 
    269                 if (VERBOSE) {
    270                     ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
    271                             binder.get(), this, mSize, heapId);
    272                 }
    273 
    274                 munmap(mBase, mSize);
    275             }
    276         } else {
    277             // remove from list only if it was mapped before
    278             sp<IBinder> binder = IInterface::asBinder(this);
    279             free_heap(binder);
    280         }
    281     }
    282 }
    283 
    284 void BpMemoryHeap::assertMapped() const
    285 {
    286     int32_t heapId = mHeapId.load(memory_order_acquire);
    287     if (heapId == -1) {
    288         sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
    289         sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
    290         heap->assertReallyMapped();
    291         if (heap->mBase != MAP_FAILED) {
    292             Mutex::Autolock _l(mLock);
    293             if (mHeapId.load(memory_order_relaxed) == -1) {
    294                 mBase   = heap->mBase;
    295                 mSize   = heap->mSize;
    296                 mOffset = heap->mOffset;
    297                 int fd = fcntl(heap->mHeapId.load(memory_order_relaxed), F_DUPFD_CLOEXEC, 0);
    298                 ALOGE_IF(fd==-1, "cannot dup fd=%d",
    299                         heap->mHeapId.load(memory_order_relaxed));
    300                 mHeapId.store(fd, memory_order_release);
    301             }
    302         } else {
    303             // something went wrong
    304             free_heap(binder);
    305         }
    306     }
    307 }
    308 
    309 void BpMemoryHeap::assertReallyMapped() const
    310 {
    311     int32_t heapId = mHeapId.load(memory_order_acquire);
    312     if (heapId == -1) {
    313 
    314         // remote call without mLock held, worse case scenario, we end up
    315         // calling transact() from multiple threads, but that's not a problem,
    316         // only mmap below must be in the critical section.
    317 
    318         Parcel data, reply;
    319         data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
    320         status_t err = remote()->transact(HEAP_ID, data, &reply);
    321         int parcel_fd = reply.readFileDescriptor();
    322         const uint64_t size64 = reply.readUint64();
    323         const int64_t offset64 = reply.readInt64();
    324         const uint32_t flags = reply.readUint32();
    325         const size_t size = (size_t)size64;
    326         const off_t offset = (off_t)offset64;
    327         if (err != NO_ERROR || // failed transaction
    328                 size != size64 || offset != offset64) { // ILP32 size check
    329             ALOGE("binder=%p transaction failed fd=%d, size=%zu, err=%d (%s)",
    330                     IInterface::asBinder(this).get(),
    331                     parcel_fd, size, err, strerror(-err));
    332             return;
    333         }
    334 
    335         Mutex::Autolock _l(mLock);
    336         if (mHeapId.load(memory_order_relaxed) == -1) {
    337             int fd = fcntl(parcel_fd, F_DUPFD_CLOEXEC, 0);
    338             ALOGE_IF(fd == -1, "cannot dup fd=%d, size=%zu, err=%d (%s)",
    339                     parcel_fd, size, err, strerror(errno));
    340 
    341             int access = PROT_READ;
    342             if (!(flags & READ_ONLY)) {
    343                 access |= PROT_WRITE;
    344             }
    345             mRealHeap = true;
    346             mBase = mmap(nullptr, size, access, MAP_SHARED, fd, offset);
    347             if (mBase == MAP_FAILED) {
    348                 ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zu, fd=%d (%s)",
    349                         IInterface::asBinder(this).get(), size, fd, strerror(errno));
    350                 close(fd);
    351             } else {
    352                 mSize = size;
    353                 mFlags = flags;
    354                 mOffset = offset;
    355                 mHeapId.store(fd, memory_order_release);
    356             }
    357         }
    358     }
    359 }
    360 
    361 int BpMemoryHeap::getHeapID() const {
    362     assertMapped();
    363     // We either stored mHeapId ourselves, or loaded it with acquire semantics.
    364     return mHeapId.load(memory_order_relaxed);
    365 }
    366 
    367 void* BpMemoryHeap::getBase() const {
    368     assertMapped();
    369     return mBase;
    370 }
    371 
    372 size_t BpMemoryHeap::getSize() const {
    373     assertMapped();
    374     return mSize;
    375 }
    376 
    377 uint32_t BpMemoryHeap::getFlags() const {
    378     assertMapped();
    379     return mFlags;
    380 }
    381 
    382 off_t BpMemoryHeap::getOffset() const {
    383     assertMapped();
    384     return mOffset;
    385 }
    386 
    387 // ---------------------------------------------------------------------------
    388 
    389 IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap");
    390 
    391 BnMemoryHeap::BnMemoryHeap() {
    392 }
    393 
    394 BnMemoryHeap::~BnMemoryHeap() {
    395 }
    396 
    397 // NOLINTNEXTLINE(google-default-arguments)
    398 status_t BnMemoryHeap::onTransact(
    399         uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
    400 {
    401     switch(code) {
    402        case HEAP_ID: {
    403             CHECK_INTERFACE(IMemoryHeap, data, reply);
    404             reply->writeFileDescriptor(getHeapID());
    405             reply->writeUint64(getSize());
    406             reply->writeInt64(getOffset());
    407             reply->writeUint32(getFlags());
    408             return NO_ERROR;
    409         } break;
    410         default:
    411             return BBinder::onTransact(code, data, reply, flags);
    412     }
    413 }
    414 
    415 /*****************************************************************************/
    416 
    417 HeapCache::HeapCache()
    418     : DeathRecipient()
    419 {
    420 }
    421 
    422 HeapCache::~HeapCache()
    423 {
    424 }
    425 
    426 void HeapCache::binderDied(const wp<IBinder>& binder)
    427 {
    428     //ALOGD("binderDied binder=%p", binder.unsafe_get());
    429     free_heap(binder);
    430 }
    431 
    432 sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
    433 {
    434     Mutex::Autolock _l(mHeapCacheLock);
    435     ssize_t i = mHeapCache.indexOfKey(binder);
    436     if (i>=0) {
    437         heap_info_t& info = mHeapCache.editValueAt(i);
    438         ALOGD_IF(VERBOSE,
    439                 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
    440                 binder.get(), info.heap.get(),
    441                 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
    442                 static_cast<BpMemoryHeap*>(info.heap.get())
    443                     ->mHeapId.load(memory_order_relaxed),
    444                 info.count);
    445         ++info.count;
    446         return info.heap;
    447     } else {
    448         heap_info_t info;
    449         info.heap = interface_cast<IMemoryHeap>(binder);
    450         info.count = 1;
    451         //ALOGD("adding binder=%p, heap=%p, count=%d",
    452         //      binder.get(), info.heap.get(), info.count);
    453         mHeapCache.add(binder, info);
    454         return info.heap;
    455     }
    456 }
    457 
    458 void HeapCache::free_heap(const sp<IBinder>& binder)  {
    459     free_heap( wp<IBinder>(binder) );
    460 }
    461 
    462 void HeapCache::free_heap(const wp<IBinder>& binder)
    463 {
    464     sp<IMemoryHeap> rel;
    465     {
    466         Mutex::Autolock _l(mHeapCacheLock);
    467         ssize_t i = mHeapCache.indexOfKey(binder);
    468         if (i>=0) {
    469             heap_info_t& info(mHeapCache.editValueAt(i));
    470             if (--info.count == 0) {
    471                 ALOGD_IF(VERBOSE,
    472                         "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
    473                         binder.unsafe_get(), info.heap.get(),
    474                         static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
    475                         static_cast<BpMemoryHeap*>(info.heap.get())
    476                             ->mHeapId.load(memory_order_relaxed),
    477                         info.count);
    478                 rel = mHeapCache.valueAt(i).heap;
    479                 mHeapCache.removeItemsAt(i);
    480             }
    481         } else {
    482             ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
    483         }
    484     }
    485 }
    486 
    487 sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder)
    488 {
    489     sp<IMemoryHeap> realHeap;
    490     Mutex::Autolock _l(mHeapCacheLock);
    491     ssize_t i = mHeapCache.indexOfKey(binder);
    492     if (i>=0)   realHeap = mHeapCache.valueAt(i).heap;
    493     else        realHeap = interface_cast<IMemoryHeap>(binder);
    494     return realHeap;
    495 }
    496 
    497 void HeapCache::dump_heaps()
    498 {
    499     Mutex::Autolock _l(mHeapCacheLock);
    500     int c = mHeapCache.size();
    501     for (int i=0 ; i<c ; i++) {
    502         const heap_info_t& info = mHeapCache.valueAt(i);
    503         BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get()));
    504         ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
    505                 mHeapCache.keyAt(i).unsafe_get(),
    506                 info.heap.get(), info.count,
    507                 h->mHeapId.load(memory_order_relaxed), h->mBase, h->mSize);
    508     }
    509 }
    510 
    511 
    512 // ---------------------------------------------------------------------------
    513 }; // namespace android
    514