1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 //#define LOG_NDEBUG 0 18 #define LOG_TAG "C2AllocatorIon" 19 #include <utils/Log.h> 20 21 #include <list> 22 23 #include <ion/ion.h> 24 #include <sys/mman.h> 25 #include <unistd.h> // getpagesize, size_t, close, dup 26 27 #include <C2AllocatorIon.h> 28 #include <C2Buffer.h> 29 #include <C2Debug.h> 30 #include <C2ErrnoUtils.h> 31 32 namespace android { 33 34 namespace { 35 constexpr size_t USAGE_LRU_CACHE_SIZE = 1024; 36 } 37 38 /* size_t <=> int(lo), int(hi) conversions */ 39 constexpr inline int size2intLo(size_t s) { 40 return int(s & 0xFFFFFFFF); 41 } 42 43 constexpr inline int size2intHi(size_t s) { 44 // cast to uint64_t as size_t may be 32 bits wide 45 return int((uint64_t(s) >> 32) & 0xFFFFFFFF); 46 } 47 48 constexpr inline size_t ints2size(int intLo, int intHi) { 49 // convert in 2 stages to 64 bits as intHi may be negative 50 return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32); 51 } 52 53 /* ========================================= ION HANDLE ======================================== */ 54 /** 55 * ION handle 56 * 57 * There can be only a sole ion client per process, this is captured in the ion fd that is passed 58 * to the constructor, but this should be managed by the ion buffer allocator/mapper. 59 * 60 * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as 61 * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds 62 * a refcount. 63 * 64 * This handle will not capture mapped fd-s as updating that would require a global mutex. 65 */ 66 67 struct C2HandleIon : public C2Handle { 68 // ion handle owns ionFd(!) and bufferFd 69 C2HandleIon(int bufferFd, size_t size) 70 : C2Handle(cHeader), 71 mFds{ bufferFd }, 72 mInts{ int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic } { } 73 74 static bool isValid(const C2Handle * const o); 75 76 int bufferFd() const { return mFds.mBuffer; } 77 size_t size() const { 78 return size_t(unsigned(mInts.mSizeLo)) 79 | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32); 80 } 81 82 protected: 83 struct { 84 int mBuffer; // shared ion buffer 85 } mFds; 86 struct { 87 int mSizeLo; // low 32-bits of size 88 int mSizeHi; // high 32-bits of size 89 int mMagic; 90 } mInts; 91 92 private: 93 typedef C2HandleIon _type; 94 enum { 95 kMagic = '\xc2io\x00', 96 numFds = sizeof(mFds) / sizeof(int), 97 numInts = sizeof(mInts) / sizeof(int), 98 version = sizeof(C2Handle) 99 }; 100 //constexpr static C2Handle cHeader = { version, numFds, numInts, {} }; 101 const static C2Handle cHeader; 102 }; 103 104 const C2Handle C2HandleIon::cHeader = { 105 C2HandleIon::version, 106 C2HandleIon::numFds, 107 C2HandleIon::numInts, 108 {} 109 }; 110 111 // static 112 bool C2HandleIon::isValid(const C2Handle * const o) { 113 if (!o || memcmp(o, &cHeader, sizeof(cHeader))) { 114 return false; 115 } 116 const C2HandleIon *other = static_cast<const C2HandleIon*>(o); 117 return other->mInts.mMagic == kMagic; 118 } 119 120 // TODO: is the dup of an ion fd identical to ion_share? 121 122 /* ======================================= ION ALLOCATION ====================================== */ 123 class C2AllocationIon : public C2LinearAllocation { 124 public: 125 /* Interface methods */ 126 virtual c2_status_t map( 127 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, 128 void **addr /* nonnull */) override; 129 virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override; 130 virtual ~C2AllocationIon() override; 131 virtual const C2Handle *handle() const override; 132 virtual id_t getAllocatorId() const override; 133 virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override; 134 135 // internal methods 136 C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id); 137 C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id); 138 139 c2_status_t status() const; 140 141 protected: 142 class Impl; 143 class ImplV2; 144 Impl *mImpl; 145 146 // TODO: we could make this encapsulate shared_ptr and copiable 147 C2_DO_NOT_COPY(C2AllocationIon); 148 }; 149 150 class C2AllocationIon::Impl { 151 protected: 152 /** 153 * Constructs an ion allocation. 154 * 155 * \note We always create an ion allocation, even if the allocation or import fails 156 * so that we can capture the error. 157 * 158 * \param ionFd ion client (ownership transferred to created object) 159 * \param capacity size of allocation 160 * \param bufferFd buffer handle (ownership transferred to created object). Must be 161 * invalid if err is not 0. 162 * \param buffer ion buffer user handle (ownership transferred to created object). Must be 163 * invalid if err is not 0. 164 * \param err errno during buffer allocation or import 165 */ 166 Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err) 167 : mIonFd(ionFd), 168 mHandle(bufferFd, capacity), 169 mBuffer(buffer), 170 mId(id), 171 mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)), 172 mMapFd(-1) { 173 if (mInit != C2_OK) { 174 // close ionFd now on error 175 if (mIonFd >= 0) { 176 close(mIonFd); 177 mIonFd = -1; 178 } 179 // C2_CHECK(bufferFd < 0); 180 // C2_CHECK(buffer < 0); 181 } 182 } 183 184 public: 185 /** 186 * Constructs an ion allocation by importing a shared buffer fd. 187 * 188 * \param ionFd ion client (ownership transferred to created object) 189 * \param capacity size of allocation 190 * \param bufferFd buffer handle (ownership transferred to created object) 191 * 192 * \return created ion allocation (implementation) which may be invalid if the 193 * import failed. 194 */ 195 static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id); 196 197 /** 198 * Constructs an ion allocation by allocating an ion buffer. 199 * 200 * \param ionFd ion client (ownership transferred to created object) 201 * \param size size of allocation 202 * \param align desired alignment of allocation 203 * \param heapMask mask of heaps considered 204 * \param flags ion allocation flags 205 * 206 * \return created ion allocation (implementation) which may be invalid if the 207 * allocation failed. 208 */ 209 static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id); 210 211 c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) { 212 (void)fence; // TODO: wait for fence 213 *addr = nullptr; 214 if (!mMappings.empty()) { 215 ALOGV("multiple map"); 216 // TODO: technically we should return DUPLICATE here, but our block views don't 217 // actually unmap, so we end up remapping an ion buffer multiple times. 218 // 219 // return C2_DUPLICATE; 220 } 221 if (size == 0) { 222 return C2_BAD_VALUE; 223 } 224 225 int prot = PROT_NONE; 226 int flags = MAP_SHARED; 227 if (usage.expected & C2MemoryUsage::CPU_READ) { 228 prot |= PROT_READ; 229 } 230 if (usage.expected & C2MemoryUsage::CPU_WRITE) { 231 prot |= PROT_WRITE; 232 } 233 234 size_t alignmentBytes = offset % PAGE_SIZE; 235 size_t mapOffset = offset - alignmentBytes; 236 size_t mapSize = size + alignmentBytes; 237 Mapping map = { nullptr, alignmentBytes, mapSize }; 238 239 c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr); 240 if (map.addr) { 241 mMappings.push_back(map); 242 } 243 return err; 244 } 245 246 c2_status_t unmap(void *addr, size_t size, C2Fence *fence) { 247 if (mMappings.empty()) { 248 ALOGD("tried to unmap unmapped buffer"); 249 return C2_NOT_FOUND; 250 } 251 for (auto it = mMappings.begin(); it != mMappings.end(); ++it) { 252 if (addr != (uint8_t *)it->addr + it->alignmentBytes || 253 size + it->alignmentBytes != it->size) { 254 continue; 255 } 256 int err = munmap(it->addr, it->size); 257 if (err != 0) { 258 ALOGD("munmap failed"); 259 return c2_map_errno<EINVAL>(errno); 260 } 261 if (fence) { 262 *fence = C2Fence(); // not using fences 263 } 264 (void)mMappings.erase(it); 265 ALOGV("successfully unmapped: %d", mHandle.bufferFd()); 266 return C2_OK; 267 } 268 ALOGD("unmap failed to find specified map"); 269 return C2_BAD_VALUE; 270 } 271 272 virtual ~Impl() { 273 if (!mMappings.empty()) { 274 ALOGD("Dangling mappings!"); 275 for (const Mapping &map : mMappings) { 276 (void)munmap(map.addr, map.size); 277 } 278 } 279 if (mMapFd >= 0) { 280 close(mMapFd); 281 mMapFd = -1; 282 } 283 if (mInit == C2_OK) { 284 if (mBuffer >= 0) { 285 (void)ion_free(mIonFd, mBuffer); 286 } 287 native_handle_close(&mHandle); 288 } 289 if (mIonFd >= 0) { 290 close(mIonFd); 291 } 292 } 293 294 c2_status_t status() const { 295 return mInit; 296 } 297 298 const C2Handle *handle() const { 299 return &mHandle; 300 } 301 302 C2Allocator::id_t getAllocatorId() const { 303 return mId; 304 } 305 306 virtual ion_user_handle_t ionHandle() const { 307 return mBuffer; 308 } 309 310 protected: 311 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes, 312 int prot, int flags, void** base, void** addr) { 313 c2_status_t err = C2_OK; 314 if (mMapFd == -1) { 315 int ret = ion_map(mIonFd, mBuffer, mapSize, prot, 316 flags, mapOffset, (unsigned char**)base, &mMapFd); 317 ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, " 318 "offset = %zu) returned (%d)", 319 mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret); 320 if (ret) { 321 mMapFd = -1; 322 *base = *addr = nullptr; 323 err = c2_map_errno<EINVAL>(-ret); 324 } else { 325 *addr = (uint8_t *)*base + alignmentBytes; 326 } 327 } else { 328 *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset); 329 ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) " 330 "returned (%d)", 331 mapSize, prot, flags, mMapFd, mapOffset, errno); 332 if (*base == MAP_FAILED) { 333 *base = *addr = nullptr; 334 err = c2_map_errno<EINVAL>(errno); 335 } else { 336 *addr = (uint8_t *)*base + alignmentBytes; 337 } 338 } 339 return err; 340 } 341 342 int mIonFd; 343 C2HandleIon mHandle; 344 ion_user_handle_t mBuffer; 345 C2Allocator::id_t mId; 346 c2_status_t mInit; 347 int mMapFd; // only one for now 348 struct Mapping { 349 void *addr; 350 size_t alignmentBytes; 351 size_t size; 352 }; 353 std::list<Mapping> mMappings; 354 }; 355 356 class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl { 357 public: 358 /** 359 * Constructs an ion allocation for platforms with new (ion_4.12.h) api 360 * 361 * \note We always create an ion allocation, even if the allocation or import fails 362 * so that we can capture the error. 363 * 364 * \param ionFd ion client (ownership transferred to created object) 365 * \param capacity size of allocation 366 * \param bufferFd buffer handle (ownership transferred to created object). Must be 367 * invalid if err is not 0. 368 * \param err errno during buffer allocation or import 369 */ 370 ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err) 371 : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) { 372 } 373 374 virtual ~ImplV2() = default; 375 376 virtual ion_user_handle_t ionHandle() const { 377 return mHandle.bufferFd(); 378 } 379 380 protected: 381 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes, 382 int prot, int flags, void** base, void** addr) { 383 c2_status_t err = C2_OK; 384 *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset); 385 ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) " 386 "returned (%d)", 387 mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno); 388 if (*base == MAP_FAILED) { 389 *base = *addr = nullptr; 390 err = c2_map_errno<EINVAL>(errno); 391 } else { 392 *addr = (uint8_t *)*base + alignmentBytes; 393 } 394 return err; 395 } 396 397 }; 398 399 C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd, 400 C2Allocator::id_t id) { 401 int ret = 0; 402 if (ion_is_legacy(ionFd)) { 403 ion_user_handle_t buffer = -1; 404 ret = ion_import(ionFd, bufferFd, &buffer); 405 return new Impl(ionFd, capacity, bufferFd, buffer, id, ret); 406 } else { 407 return new ImplV2(ionFd, capacity, bufferFd, id, ret); 408 } 409 } 410 411 C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align, 412 unsigned heapMask, unsigned flags, C2Allocator::id_t id) { 413 int bufferFd = -1; 414 ion_user_handle_t buffer = -1; 415 size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1); 416 int ret; 417 418 if (ion_is_legacy(ionFd)) { 419 ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer); 420 ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) " 421 "returned (%d) ; buffer = %d", 422 ionFd, alignedSize, align, heapMask, flags, ret, buffer); 423 if (ret == 0) { 424 // get buffer fd for native handle constructor 425 ret = ion_share(ionFd, buffer, &bufferFd); 426 if (ret != 0) { 427 ion_free(ionFd, buffer); 428 buffer = -1; 429 } 430 } 431 return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret); 432 433 } else { 434 ret = ion_alloc_fd(ionFd, alignedSize, align, heapMask, flags, &bufferFd); 435 ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) " 436 "returned (%d) ; bufferFd = %d", 437 ionFd, alignedSize, align, heapMask, flags, ret, bufferFd); 438 439 return new ImplV2(ionFd, alignedSize, bufferFd, id, ret); 440 } 441 } 442 443 c2_status_t C2AllocationIon::map( 444 size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) { 445 return mImpl->map(offset, size, usage, fence, addr); 446 } 447 448 c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) { 449 return mImpl->unmap(addr, size, fence); 450 } 451 452 c2_status_t C2AllocationIon::status() const { 453 return mImpl->status(); 454 } 455 456 C2Allocator::id_t C2AllocationIon::getAllocatorId() const { 457 return mImpl->getAllocatorId(); 458 } 459 460 bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const { 461 if (!other || other->getAllocatorId() != getAllocatorId()) { 462 return false; 463 } 464 // get user handle to compare objects 465 std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other); 466 return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle(); 467 } 468 469 const C2Handle *C2AllocationIon::handle() const { 470 return mImpl->handle(); 471 } 472 473 C2AllocationIon::~C2AllocationIon() { 474 delete mImpl; 475 } 476 477 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align, 478 unsigned heapMask, unsigned flags, C2Allocator::id_t id) 479 : C2LinearAllocation(size), 480 mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { } 481 482 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id) 483 : C2LinearAllocation(size), 484 mImpl(Impl::Import(ionFd, size, shareFd, id)) { } 485 486 /* ======================================= ION ALLOCATOR ====================================== */ 487 C2AllocatorIon::C2AllocatorIon(id_t id) 488 : mInit(C2_OK), 489 mIonFd(ion_open()) { 490 if (mIonFd < 0) { 491 switch (errno) { 492 case ENOENT: mInit = C2_OMITTED; break; 493 default: mInit = c2_map_errno<EACCES>(errno); break; 494 } 495 } else { 496 C2MemoryUsage minUsage = { 0, 0 }; 497 C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE }; 498 Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage }; 499 mTraits = std::make_shared<Traits>(traits); 500 mBlockSize = ::getpagesize(); 501 } 502 } 503 504 C2AllocatorIon::~C2AllocatorIon() { 505 if (mInit == C2_OK) { 506 ion_close(mIonFd); 507 } 508 } 509 510 C2Allocator::id_t C2AllocatorIon::getId() const { 511 std::lock_guard<std::mutex> lock(mUsageMapperLock); 512 return mTraits->id; 513 } 514 515 C2String C2AllocatorIon::getName() const { 516 std::lock_guard<std::mutex> lock(mUsageMapperLock); 517 return mTraits->name; 518 } 519 520 std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const { 521 std::lock_guard<std::mutex> lock(mUsageMapperLock); 522 return mTraits; 523 } 524 525 void C2AllocatorIon::setUsageMapper( 526 const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) { 527 std::lock_guard<std::mutex> lock(mUsageMapperLock); 528 mUsageMapperCache.clear(); 529 mUsageMapperLru.clear(); 530 mUsageMapper = mapper; 531 Traits traits = { 532 mTraits->name, mTraits->id, LINEAR, 533 C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage) 534 }; 535 mTraits = std::make_shared<Traits>(traits); 536 mBlockSize = blockSize; 537 } 538 539 std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const { 540 return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second); 541 } 542 543 c2_status_t C2AllocatorIon::mapUsage( 544 C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) { 545 std::lock_guard<std::mutex> lock(mUsageMapperLock); 546 c2_status_t res = C2_OK; 547 // align capacity 548 capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1); 549 MapperKey key = std::make_pair(usage.expected, capacity); 550 auto entry = mUsageMapperCache.find(key); 551 if (entry == mUsageMapperCache.end()) { 552 if (mUsageMapper) { 553 res = mUsageMapper(usage, capacity, align, heapMask, flags); 554 } else { 555 *align = 0; // TODO make this 1 556 *heapMask = ~0; // default mask 557 if (usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)) { 558 *flags = ION_FLAG_CACHED; // cache CPU accessed buffers 559 } else { 560 *flags = 0; // default flags 561 } 562 res = C2_NO_INIT; 563 } 564 // add usage to cache 565 MapperValue value = std::make_tuple(*align, *heapMask, *flags, res); 566 mUsageMapperLru.emplace_front(key, value); 567 mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin())); 568 if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) { 569 // remove LRU entry 570 MapperKey lruKey = mUsageMapperLru.front().first; 571 mUsageMapperCache.erase(lruKey); 572 mUsageMapperLru.pop_back(); 573 } 574 } else { 575 // move entry to MRU 576 mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second); 577 const MapperValue &value = entry->second->second; 578 std::tie(*align, *heapMask, *flags, res) = value; 579 } 580 return res; 581 } 582 583 c2_status_t C2AllocatorIon::newLinearAllocation( 584 uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) { 585 if (allocation == nullptr) { 586 return C2_BAD_VALUE; 587 } 588 589 allocation->reset(); 590 if (mInit != C2_OK) { 591 return mInit; 592 } 593 594 size_t align = 0; 595 unsigned heapMask = ~0; 596 unsigned flags = 0; 597 c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags); 598 if (ret && ret != C2_NO_INIT) { 599 return ret; 600 } 601 602 std::shared_ptr<C2AllocationIon> alloc 603 = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, mTraits->id); 604 ret = alloc->status(); 605 if (ret == C2_OK) { 606 *allocation = alloc; 607 } 608 return ret; 609 } 610 611 c2_status_t C2AllocatorIon::priorLinearAllocation( 612 const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) { 613 *allocation = nullptr; 614 if (mInit != C2_OK) { 615 return mInit; 616 } 617 618 if (!C2HandleIon::isValid(handle)) { 619 return C2_BAD_VALUE; 620 } 621 622 // TODO: get capacity and validate it 623 const C2HandleIon *h = static_cast<const C2HandleIon*>(handle); 624 std::shared_ptr<C2AllocationIon> alloc 625 = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), mTraits->id); 626 c2_status_t ret = alloc->status(); 627 if (ret == C2_OK) { 628 *allocation = alloc; 629 native_handle_delete(const_cast<native_handle_t*>( 630 reinterpret_cast<const native_handle_t*>(handle))); 631 } 632 return ret; 633 } 634 635 bool C2AllocatorIon::isValid(const C2Handle* const o) { 636 return C2HandleIon::isValid(o); 637 } 638 639 } // namespace android 640 641