1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "IMemory" 18 19 #include <stdint.h> 20 #include <stdio.h> 21 #include <stdlib.h> 22 #include <fcntl.h> 23 #include <unistd.h> 24 25 #include <sys/types.h> 26 #include <sys/mman.h> 27 28 #include <binder/IMemory.h> 29 #include <utils/KeyedVector.h> 30 #include <utils/threads.h> 31 #include <utils/Atomic.h> 32 #include <binder/Parcel.h> 33 #include <utils/CallStack.h> 34 35 #define VERBOSE 0 36 37 namespace android { 38 // --------------------------------------------------------------------------- 39 40 class HeapCache : public IBinder::DeathRecipient 41 { 42 public: 43 HeapCache(); 44 virtual ~HeapCache(); 45 46 virtual void binderDied(const wp<IBinder>& who); 47 48 sp<IMemoryHeap> find_heap(const sp<IBinder>& binder); 49 void free_heap(const sp<IBinder>& binder); 50 sp<IMemoryHeap> get_heap(const sp<IBinder>& binder); 51 void dump_heaps(); 52 53 private: 54 // For IMemory.cpp 55 struct heap_info_t { 56 sp<IMemoryHeap> heap; 57 int32_t count; 58 }; 59 60 void free_heap(const wp<IBinder>& binder); 61 62 Mutex mHeapCacheLock; 63 KeyedVector< wp<IBinder>, heap_info_t > mHeapCache; 64 }; 65 66 static sp<HeapCache> gHeapCache = new HeapCache(); 67 68 /******************************************************************************/ 69 70 enum { 71 HEAP_ID = IBinder::FIRST_CALL_TRANSACTION 72 }; 73 74 class BpMemoryHeap : public BpInterface<IMemoryHeap> 75 { 76 public: 77 BpMemoryHeap(const sp<IBinder>& impl); 78 virtual ~BpMemoryHeap(); 79 80 virtual int getHeapID() const; 81 virtual void* getBase() const; 82 virtual size_t getSize() const; 83 virtual uint32_t getFlags() const; 84 virtual uint32_t getOffset() const; 85 86 private: 87 friend class IMemory; 88 friend class HeapCache; 89 90 // for debugging in this module 91 static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) { 92 return gHeapCache->find_heap(binder); 93 } 94 static inline void free_heap(const sp<IBinder>& binder) { 95 gHeapCache->free_heap(binder); 96 } 97 static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) { 98 return gHeapCache->get_heap(binder); 99 } 100 static inline void dump_heaps() { 101 gHeapCache->dump_heaps(); 102 } 103 104 void assertMapped() const; 105 void assertReallyMapped() const; 106 107 mutable volatile int32_t mHeapId; 108 mutable void* mBase; 109 mutable size_t mSize; 110 mutable uint32_t mFlags; 111 mutable uint32_t mOffset; 112 mutable bool mRealHeap; 113 mutable Mutex mLock; 114 }; 115 116 // ---------------------------------------------------------------------------- 117 118 enum { 119 GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION 120 }; 121 122 class BpMemory : public BpInterface<IMemory> 123 { 124 public: 125 BpMemory(const sp<IBinder>& impl); 126 virtual ~BpMemory(); 127 virtual sp<IMemoryHeap> getMemory(ssize_t* offset=0, size_t* size=0) const; 128 129 private: 130 mutable sp<IMemoryHeap> mHeap; 131 mutable ssize_t mOffset; 132 mutable size_t mSize; 133 }; 134 135 /******************************************************************************/ 136 137 void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const 138 { 139 sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder); 140 void* const base = realHeap->base(); 141 if (base == MAP_FAILED) 142 return 0; 143 return static_cast<char*>(base) + offset; 144 } 145 146 void* IMemory::pointer() const { 147 ssize_t offset; 148 sp<IMemoryHeap> heap = getMemory(&offset); 149 void* const base = heap!=0 ? heap->base() : MAP_FAILED; 150 if (base == MAP_FAILED) 151 return 0; 152 return static_cast<char*>(base) + offset; 153 } 154 155 size_t IMemory::size() const { 156 size_t size; 157 getMemory(NULL, &size); 158 return size; 159 } 160 161 ssize_t IMemory::offset() const { 162 ssize_t offset; 163 getMemory(&offset); 164 return offset; 165 } 166 167 /******************************************************************************/ 168 169 BpMemory::BpMemory(const sp<IBinder>& impl) 170 : BpInterface<IMemory>(impl), mOffset(0), mSize(0) 171 { 172 } 173 174 BpMemory::~BpMemory() 175 { 176 } 177 178 sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const 179 { 180 if (mHeap == 0) { 181 Parcel data, reply; 182 data.writeInterfaceToken(IMemory::getInterfaceDescriptor()); 183 if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) { 184 sp<IBinder> heap = reply.readStrongBinder(); 185 ssize_t o = reply.readInt32(); 186 size_t s = reply.readInt32(); 187 if (heap != 0) { 188 mHeap = interface_cast<IMemoryHeap>(heap); 189 if (mHeap != 0) { 190 mOffset = o; 191 mSize = s; 192 } 193 } 194 } 195 } 196 if (offset) *offset = mOffset; 197 if (size) *size = mSize; 198 return mHeap; 199 } 200 201 // --------------------------------------------------------------------------- 202 203 IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory"); 204 205 BnMemory::BnMemory() { 206 } 207 208 BnMemory::~BnMemory() { 209 } 210 211 status_t BnMemory::onTransact( 212 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 213 { 214 switch(code) { 215 case GET_MEMORY: { 216 CHECK_INTERFACE(IMemory, data, reply); 217 ssize_t offset; 218 size_t size; 219 reply->writeStrongBinder( getMemory(&offset, &size)->asBinder() ); 220 reply->writeInt32(offset); 221 reply->writeInt32(size); 222 return NO_ERROR; 223 } break; 224 default: 225 return BBinder::onTransact(code, data, reply, flags); 226 } 227 } 228 229 230 /******************************************************************************/ 231 232 BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl) 233 : BpInterface<IMemoryHeap>(impl), 234 mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false) 235 { 236 } 237 238 BpMemoryHeap::~BpMemoryHeap() { 239 if (mHeapId != -1) { 240 close(mHeapId); 241 if (mRealHeap) { 242 // by construction we're the last one 243 if (mBase != MAP_FAILED) { 244 sp<IBinder> binder = const_cast<BpMemoryHeap*>(this)->asBinder(); 245 246 if (VERBOSE) { 247 ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d", 248 binder.get(), this, mSize, mHeapId); 249 CallStack stack(LOG_TAG); 250 } 251 252 munmap(mBase, mSize); 253 } 254 } else { 255 // remove from list only if it was mapped before 256 sp<IBinder> binder = const_cast<BpMemoryHeap*>(this)->asBinder(); 257 free_heap(binder); 258 } 259 } 260 } 261 262 void BpMemoryHeap::assertMapped() const 263 { 264 if (mHeapId == -1) { 265 sp<IBinder> binder(const_cast<BpMemoryHeap*>(this)->asBinder()); 266 sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get())); 267 heap->assertReallyMapped(); 268 if (heap->mBase != MAP_FAILED) { 269 Mutex::Autolock _l(mLock); 270 if (mHeapId == -1) { 271 mBase = heap->mBase; 272 mSize = heap->mSize; 273 mOffset = heap->mOffset; 274 android_atomic_write( dup( heap->mHeapId ), &mHeapId ); 275 } 276 } else { 277 // something went wrong 278 free_heap(binder); 279 } 280 } 281 } 282 283 void BpMemoryHeap::assertReallyMapped() const 284 { 285 if (mHeapId == -1) { 286 287 // remote call without mLock held, worse case scenario, we end up 288 // calling transact() from multiple threads, but that's not a problem, 289 // only mmap below must be in the critical section. 290 291 Parcel data, reply; 292 data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor()); 293 status_t err = remote()->transact(HEAP_ID, data, &reply); 294 int parcel_fd = reply.readFileDescriptor(); 295 ssize_t size = reply.readInt32(); 296 uint32_t flags = reply.readInt32(); 297 uint32_t offset = reply.readInt32(); 298 299 ALOGE_IF(err, "binder=%p transaction failed fd=%d, size=%zd, err=%d (%s)", 300 asBinder().get(), parcel_fd, size, err, strerror(-err)); 301 302 int fd = dup( parcel_fd ); 303 ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, err=%d (%s)", 304 parcel_fd, size, err, strerror(errno)); 305 306 int access = PROT_READ; 307 if (!(flags & READ_ONLY)) { 308 access |= PROT_WRITE; 309 } 310 311 Mutex::Autolock _l(mLock); 312 if (mHeapId == -1) { 313 mRealHeap = true; 314 mBase = mmap(0, size, access, MAP_SHARED, fd, offset); 315 if (mBase == MAP_FAILED) { 316 ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zd, fd=%d (%s)", 317 asBinder().get(), size, fd, strerror(errno)); 318 close(fd); 319 } else { 320 mSize = size; 321 mFlags = flags; 322 mOffset = offset; 323 android_atomic_write(fd, &mHeapId); 324 } 325 } 326 } 327 } 328 329 int BpMemoryHeap::getHeapID() const { 330 assertMapped(); 331 return mHeapId; 332 } 333 334 void* BpMemoryHeap::getBase() const { 335 assertMapped(); 336 return mBase; 337 } 338 339 size_t BpMemoryHeap::getSize() const { 340 assertMapped(); 341 return mSize; 342 } 343 344 uint32_t BpMemoryHeap::getFlags() const { 345 assertMapped(); 346 return mFlags; 347 } 348 349 uint32_t BpMemoryHeap::getOffset() const { 350 assertMapped(); 351 return mOffset; 352 } 353 354 // --------------------------------------------------------------------------- 355 356 IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap"); 357 358 BnMemoryHeap::BnMemoryHeap() { 359 } 360 361 BnMemoryHeap::~BnMemoryHeap() { 362 } 363 364 status_t BnMemoryHeap::onTransact( 365 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 366 { 367 switch(code) { 368 case HEAP_ID: { 369 CHECK_INTERFACE(IMemoryHeap, data, reply); 370 reply->writeFileDescriptor(getHeapID()); 371 reply->writeInt32(getSize()); 372 reply->writeInt32(getFlags()); 373 reply->writeInt32(getOffset()); 374 return NO_ERROR; 375 } break; 376 default: 377 return BBinder::onTransact(code, data, reply, flags); 378 } 379 } 380 381 /*****************************************************************************/ 382 383 HeapCache::HeapCache() 384 : DeathRecipient() 385 { 386 } 387 388 HeapCache::~HeapCache() 389 { 390 } 391 392 void HeapCache::binderDied(const wp<IBinder>& binder) 393 { 394 //ALOGD("binderDied binder=%p", binder.unsafe_get()); 395 free_heap(binder); 396 } 397 398 sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder) 399 { 400 Mutex::Autolock _l(mHeapCacheLock); 401 ssize_t i = mHeapCache.indexOfKey(binder); 402 if (i>=0) { 403 heap_info_t& info = mHeapCache.editValueAt(i); 404 ALOGD_IF(VERBOSE, 405 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d", 406 binder.get(), info.heap.get(), 407 static_cast<BpMemoryHeap*>(info.heap.get())->mSize, 408 static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId, 409 info.count); 410 android_atomic_inc(&info.count); 411 return info.heap; 412 } else { 413 heap_info_t info; 414 info.heap = interface_cast<IMemoryHeap>(binder); 415 info.count = 1; 416 //ALOGD("adding binder=%p, heap=%p, count=%d", 417 // binder.get(), info.heap.get(), info.count); 418 mHeapCache.add(binder, info); 419 return info.heap; 420 } 421 } 422 423 void HeapCache::free_heap(const sp<IBinder>& binder) { 424 free_heap( wp<IBinder>(binder) ); 425 } 426 427 void HeapCache::free_heap(const wp<IBinder>& binder) 428 { 429 sp<IMemoryHeap> rel; 430 { 431 Mutex::Autolock _l(mHeapCacheLock); 432 ssize_t i = mHeapCache.indexOfKey(binder); 433 if (i>=0) { 434 heap_info_t& info(mHeapCache.editValueAt(i)); 435 int32_t c = android_atomic_dec(&info.count); 436 if (c == 1) { 437 ALOGD_IF(VERBOSE, 438 "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d", 439 binder.unsafe_get(), info.heap.get(), 440 static_cast<BpMemoryHeap*>(info.heap.get())->mSize, 441 static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId, 442 info.count); 443 rel = mHeapCache.valueAt(i).heap; 444 mHeapCache.removeItemsAt(i); 445 } 446 } else { 447 ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get()); 448 } 449 } 450 } 451 452 sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder) 453 { 454 sp<IMemoryHeap> realHeap; 455 Mutex::Autolock _l(mHeapCacheLock); 456 ssize_t i = mHeapCache.indexOfKey(binder); 457 if (i>=0) realHeap = mHeapCache.valueAt(i).heap; 458 else realHeap = interface_cast<IMemoryHeap>(binder); 459 return realHeap; 460 } 461 462 void HeapCache::dump_heaps() 463 { 464 Mutex::Autolock _l(mHeapCacheLock); 465 int c = mHeapCache.size(); 466 for (int i=0 ; i<c ; i++) { 467 const heap_info_t& info = mHeapCache.valueAt(i); 468 BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get())); 469 ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)", 470 mHeapCache.keyAt(i).unsafe_get(), 471 info.heap.get(), info.count, 472 h->mHeapId, h->mBase, h->mSize); 473 } 474 } 475 476 477 // --------------------------------------------------------------------------- 478 }; // namespace android 479