1 /* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "Parcel" 18 //#define LOG_NDEBUG 0 19 20 #include <errno.h> 21 #include <fcntl.h> 22 #include <inttypes.h> 23 #include <pthread.h> 24 #include <stdint.h> 25 #include <stdio.h> 26 #include <stdlib.h> 27 #include <sys/mman.h> 28 #include <sys/stat.h> 29 #include <sys/types.h> 30 #include <sys/resource.h> 31 #include <unistd.h> 32 33 #include <binder/Binder.h> 34 #include <binder/BpBinder.h> 35 #include <binder/IPCThreadState.h> 36 #include <binder/Parcel.h> 37 #include <binder/ProcessState.h> 38 #include <binder/Status.h> 39 #include <binder/TextOutput.h> 40 #include <binder/Value.h> 41 42 #include <cutils/ashmem.h> 43 #include <utils/Debug.h> 44 #include <utils/Flattenable.h> 45 #include <utils/Log.h> 46 #include <utils/misc.h> 47 #include <utils/String8.h> 48 #include <utils/String16.h> 49 50 #include <private/binder/binder_module.h> 51 #include <private/binder/Static.h> 52 53 #ifndef INT32_MAX 54 #define INT32_MAX ((int32_t)(2147483647)) 55 #endif 56 57 #define LOG_REFS(...) 58 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 59 #define LOG_ALLOC(...) 60 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 61 62 // --------------------------------------------------------------------------- 63 64 // This macro should never be used at runtime, as a too large value 65 // of s could cause an integer overflow. Instead, you should always 66 // use the wrapper function pad_size() 67 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3) 68 69 static size_t pad_size(size_t s) { 70 if (s > (SIZE_T_MAX - 3)) { 71 abort(); 72 } 73 return PAD_SIZE_UNSAFE(s); 74 } 75 76 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 77 #define STRICT_MODE_PENALTY_GATHER (0x40 << 16) 78 79 // XXX This can be made public if we want to provide 80 // support for typed data. 81 struct small_flat_data 82 { 83 uint32_t type; 84 uint32_t data; 85 }; 86 87 namespace android { 88 89 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; 90 static size_t gParcelGlobalAllocSize = 0; 91 static size_t gParcelGlobalAllocCount = 0; 92 93 static size_t gMaxFds = 0; 94 95 // Maximum size of a blob to transfer in-place. 96 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024; 97 98 enum { 99 BLOB_INPLACE = 0, 100 BLOB_ASHMEM_IMMUTABLE = 1, 101 BLOB_ASHMEM_MUTABLE = 2, 102 }; 103 104 void acquire_object(const sp<ProcessState>& proc, 105 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 106 { 107 switch (obj.type) { 108 case BINDER_TYPE_BINDER: 109 if (obj.binder) { 110 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 111 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); 112 } 113 return; 114 case BINDER_TYPE_WEAK_BINDER: 115 if (obj.binder) 116 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 117 return; 118 case BINDER_TYPE_HANDLE: { 119 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 120 if (b != NULL) { 121 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 122 b->incStrong(who); 123 } 124 return; 125 } 126 case BINDER_TYPE_WEAK_HANDLE: { 127 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 128 if (b != NULL) b.get_refs()->incWeak(who); 129 return; 130 } 131 case BINDER_TYPE_FD: { 132 if ((obj.cookie != 0) && (outAshmemSize != NULL) && ashmem_valid(obj.handle)) { 133 // If we own an ashmem fd, keep track of how much memory it refers to. 134 int size = ashmem_get_size_region(obj.handle); 135 if (size > 0) { 136 *outAshmemSize += size; 137 } 138 } 139 return; 140 } 141 } 142 143 ALOGD("Invalid object type 0x%08x", obj.type); 144 } 145 146 void acquire_object(const sp<ProcessState>& proc, 147 const flat_binder_object& obj, const void* who) 148 { 149 acquire_object(proc, obj, who, NULL); 150 } 151 152 static void release_object(const sp<ProcessState>& proc, 153 const flat_binder_object& obj, const void* who, size_t* outAshmemSize) 154 { 155 switch (obj.type) { 156 case BINDER_TYPE_BINDER: 157 if (obj.binder) { 158 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 159 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who); 160 } 161 return; 162 case BINDER_TYPE_WEAK_BINDER: 163 if (obj.binder) 164 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 165 return; 166 case BINDER_TYPE_HANDLE: { 167 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 168 if (b != NULL) { 169 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 170 b->decStrong(who); 171 } 172 return; 173 } 174 case BINDER_TYPE_WEAK_HANDLE: { 175 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 176 if (b != NULL) b.get_refs()->decWeak(who); 177 return; 178 } 179 case BINDER_TYPE_FD: { 180 if (obj.cookie != 0) { // owned 181 if ((outAshmemSize != NULL) && ashmem_valid(obj.handle)) { 182 int size = ashmem_get_size_region(obj.handle); 183 if (size > 0) { 184 *outAshmemSize -= size; 185 } 186 } 187 188 close(obj.handle); 189 } 190 return; 191 } 192 } 193 194 ALOGE("Invalid object type 0x%08x", obj.type); 195 } 196 197 void release_object(const sp<ProcessState>& proc, 198 const flat_binder_object& obj, const void* who) 199 { 200 release_object(proc, obj, who, NULL); 201 } 202 203 inline static status_t finish_flatten_binder( 204 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out) 205 { 206 return out->writeObject(flat, false); 207 } 208 209 status_t flatten_binder(const sp<ProcessState>& /*proc*/, 210 const sp<IBinder>& binder, Parcel* out) 211 { 212 flat_binder_object obj; 213 214 if (IPCThreadState::self()->backgroundSchedulingDisabled()) { 215 /* minimum priority for all nodes is nice 0 */ 216 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS; 217 } else { 218 /* minimum priority for all nodes is MAX_NICE(19) */ 219 obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS; 220 } 221 222 if (binder != NULL) { 223 IBinder *local = binder->localBinder(); 224 if (!local) { 225 BpBinder *proxy = binder->remoteBinder(); 226 if (proxy == NULL) { 227 ALOGE("null proxy"); 228 } 229 const int32_t handle = proxy ? proxy->handle() : 0; 230 obj.type = BINDER_TYPE_HANDLE; 231 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 232 obj.handle = handle; 233 obj.cookie = 0; 234 } else { 235 obj.type = BINDER_TYPE_BINDER; 236 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs()); 237 obj.cookie = reinterpret_cast<uintptr_t>(local); 238 } 239 } else { 240 obj.type = BINDER_TYPE_BINDER; 241 obj.binder = 0; 242 obj.cookie = 0; 243 } 244 245 return finish_flatten_binder(binder, obj, out); 246 } 247 248 status_t flatten_binder(const sp<ProcessState>& /*proc*/, 249 const wp<IBinder>& binder, Parcel* out) 250 { 251 flat_binder_object obj; 252 253 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 254 if (binder != NULL) { 255 sp<IBinder> real = binder.promote(); 256 if (real != NULL) { 257 IBinder *local = real->localBinder(); 258 if (!local) { 259 BpBinder *proxy = real->remoteBinder(); 260 if (proxy == NULL) { 261 ALOGE("null proxy"); 262 } 263 const int32_t handle = proxy ? proxy->handle() : 0; 264 obj.type = BINDER_TYPE_WEAK_HANDLE; 265 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 266 obj.handle = handle; 267 obj.cookie = 0; 268 } else { 269 obj.type = BINDER_TYPE_WEAK_BINDER; 270 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs()); 271 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get()); 272 } 273 return finish_flatten_binder(real, obj, out); 274 } 275 276 // XXX How to deal? In order to flatten the given binder, 277 // we need to probe it for information, which requires a primary 278 // reference... but we don't have one. 279 // 280 // The OpenBinder implementation uses a dynamic_cast<> here, 281 // but we can't do that with the different reference counting 282 // implementation we are using. 283 ALOGE("Unable to unflatten Binder weak reference!"); 284 obj.type = BINDER_TYPE_BINDER; 285 obj.binder = 0; 286 obj.cookie = 0; 287 return finish_flatten_binder(NULL, obj, out); 288 289 } else { 290 obj.type = BINDER_TYPE_BINDER; 291 obj.binder = 0; 292 obj.cookie = 0; 293 return finish_flatten_binder(NULL, obj, out); 294 } 295 } 296 297 inline static status_t finish_unflatten_binder( 298 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/, 299 const Parcel& /*in*/) 300 { 301 return NO_ERROR; 302 } 303 304 status_t unflatten_binder(const sp<ProcessState>& proc, 305 const Parcel& in, sp<IBinder>* out) 306 { 307 const flat_binder_object* flat = in.readObject(false); 308 309 if (flat) { 310 switch (flat->type) { 311 case BINDER_TYPE_BINDER: 312 *out = reinterpret_cast<IBinder*>(flat->cookie); 313 return finish_unflatten_binder(NULL, *flat, in); 314 case BINDER_TYPE_HANDLE: 315 *out = proc->getStrongProxyForHandle(flat->handle); 316 return finish_unflatten_binder( 317 static_cast<BpBinder*>(out->get()), *flat, in); 318 } 319 } 320 return BAD_TYPE; 321 } 322 323 status_t unflatten_binder(const sp<ProcessState>& proc, 324 const Parcel& in, wp<IBinder>* out) 325 { 326 const flat_binder_object* flat = in.readObject(false); 327 328 if (flat) { 329 switch (flat->type) { 330 case BINDER_TYPE_BINDER: 331 *out = reinterpret_cast<IBinder*>(flat->cookie); 332 return finish_unflatten_binder(NULL, *flat, in); 333 case BINDER_TYPE_WEAK_BINDER: 334 if (flat->binder != 0) { 335 out->set_object_and_refs( 336 reinterpret_cast<IBinder*>(flat->cookie), 337 reinterpret_cast<RefBase::weakref_type*>(flat->binder)); 338 } else { 339 *out = NULL; 340 } 341 return finish_unflatten_binder(NULL, *flat, in); 342 case BINDER_TYPE_HANDLE: 343 case BINDER_TYPE_WEAK_HANDLE: 344 *out = proc->getWeakProxyForHandle(flat->handle); 345 return finish_unflatten_binder( 346 static_cast<BpBinder*>(out->unsafe_get()), *flat, in); 347 } 348 } 349 return BAD_TYPE; 350 } 351 352 // --------------------------------------------------------------------------- 353 354 Parcel::Parcel() 355 { 356 LOG_ALLOC("Parcel %p: constructing", this); 357 initState(); 358 } 359 360 Parcel::~Parcel() 361 { 362 freeDataNoInit(); 363 LOG_ALLOC("Parcel %p: destroyed", this); 364 } 365 366 size_t Parcel::getGlobalAllocSize() { 367 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 368 size_t size = gParcelGlobalAllocSize; 369 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 370 return size; 371 } 372 373 size_t Parcel::getGlobalAllocCount() { 374 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 375 size_t count = gParcelGlobalAllocCount; 376 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 377 return count; 378 } 379 380 const uint8_t* Parcel::data() const 381 { 382 return mData; 383 } 384 385 size_t Parcel::dataSize() const 386 { 387 return (mDataSize > mDataPos ? mDataSize : mDataPos); 388 } 389 390 size_t Parcel::dataAvail() const 391 { 392 size_t result = dataSize() - dataPosition(); 393 if (result > INT32_MAX) { 394 abort(); 395 } 396 return result; 397 } 398 399 size_t Parcel::dataPosition() const 400 { 401 return mDataPos; 402 } 403 404 size_t Parcel::dataCapacity() const 405 { 406 return mDataCapacity; 407 } 408 409 status_t Parcel::setDataSize(size_t size) 410 { 411 if (size > INT32_MAX) { 412 // don't accept size_t values which may have come from an 413 // inadvertent conversion from a negative int. 414 return BAD_VALUE; 415 } 416 417 status_t err; 418 err = continueWrite(size); 419 if (err == NO_ERROR) { 420 mDataSize = size; 421 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize); 422 } 423 return err; 424 } 425 426 void Parcel::setDataPosition(size_t pos) const 427 { 428 if (pos > INT32_MAX) { 429 // don't accept size_t values which may have come from an 430 // inadvertent conversion from a negative int. 431 abort(); 432 } 433 434 mDataPos = pos; 435 mNextObjectHint = 0; 436 } 437 438 status_t Parcel::setDataCapacity(size_t size) 439 { 440 if (size > INT32_MAX) { 441 // don't accept size_t values which may have come from an 442 // inadvertent conversion from a negative int. 443 return BAD_VALUE; 444 } 445 446 if (size > mDataCapacity) return continueWrite(size); 447 return NO_ERROR; 448 } 449 450 status_t Parcel::setData(const uint8_t* buffer, size_t len) 451 { 452 if (len > INT32_MAX) { 453 // don't accept size_t values which may have come from an 454 // inadvertent conversion from a negative int. 455 return BAD_VALUE; 456 } 457 458 status_t err = restartWrite(len); 459 if (err == NO_ERROR) { 460 memcpy(const_cast<uint8_t*>(data()), buffer, len); 461 mDataSize = len; 462 mFdsKnown = false; 463 } 464 return err; 465 } 466 467 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len) 468 { 469 const sp<ProcessState> proc(ProcessState::self()); 470 status_t err; 471 const uint8_t *data = parcel->mData; 472 const binder_size_t *objects = parcel->mObjects; 473 size_t size = parcel->mObjectsSize; 474 int startPos = mDataPos; 475 int firstIndex = -1, lastIndex = -2; 476 477 if (len == 0) { 478 return NO_ERROR; 479 } 480 481 if (len > INT32_MAX) { 482 // don't accept size_t values which may have come from an 483 // inadvertent conversion from a negative int. 484 return BAD_VALUE; 485 } 486 487 // range checks against the source parcel size 488 if ((offset > parcel->mDataSize) 489 || (len > parcel->mDataSize) 490 || (offset + len > parcel->mDataSize)) { 491 return BAD_VALUE; 492 } 493 494 // Count objects in range 495 for (int i = 0; i < (int) size; i++) { 496 size_t off = objects[i]; 497 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) { 498 if (firstIndex == -1) { 499 firstIndex = i; 500 } 501 lastIndex = i; 502 } 503 } 504 int numObjects = lastIndex - firstIndex + 1; 505 506 if ((mDataSize+len) > mDataCapacity) { 507 // grow data 508 err = growData(len); 509 if (err != NO_ERROR) { 510 return err; 511 } 512 } 513 514 // append data 515 memcpy(mData + mDataPos, data + offset, len); 516 mDataPos += len; 517 mDataSize += len; 518 519 err = NO_ERROR; 520 521 if (numObjects > 0) { 522 // grow objects 523 if (mObjectsCapacity < mObjectsSize + numObjects) { 524 size_t newSize = ((mObjectsSize + numObjects)*3)/2; 525 if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY; // overflow 526 binder_size_t *objects = 527 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 528 if (objects == (binder_size_t*)0) { 529 return NO_MEMORY; 530 } 531 mObjects = objects; 532 mObjectsCapacity = newSize; 533 } 534 535 // append and acquire objects 536 int idx = mObjectsSize; 537 for (int i = firstIndex; i <= lastIndex; i++) { 538 size_t off = objects[i] - offset + startPos; 539 mObjects[idx++] = off; 540 mObjectsSize++; 541 542 flat_binder_object* flat 543 = reinterpret_cast<flat_binder_object*>(mData + off); 544 acquire_object(proc, *flat, this, &mOpenAshmemSize); 545 546 if (flat->type == BINDER_TYPE_FD) { 547 // If this is a file descriptor, we need to dup it so the 548 // new Parcel now owns its own fd, and can declare that we 549 // officially know we have fds. 550 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0); 551 flat->cookie = 1; 552 mHasFds = mFdsKnown = true; 553 if (!mAllowFds) { 554 err = FDS_NOT_ALLOWED; 555 } 556 } 557 } 558 } 559 560 return err; 561 } 562 563 int Parcel::compareData(const Parcel& other) { 564 size_t size = dataSize(); 565 if (size != other.dataSize()) { 566 return size < other.dataSize() ? -1 : 1; 567 } 568 return memcmp(data(), other.data(), size); 569 } 570 571 bool Parcel::allowFds() const 572 { 573 return mAllowFds; 574 } 575 576 bool Parcel::pushAllowFds(bool allowFds) 577 { 578 const bool origValue = mAllowFds; 579 if (!allowFds) { 580 mAllowFds = false; 581 } 582 return origValue; 583 } 584 585 void Parcel::restoreAllowFds(bool lastValue) 586 { 587 mAllowFds = lastValue; 588 } 589 590 bool Parcel::hasFileDescriptors() const 591 { 592 if (!mFdsKnown) { 593 scanForFds(); 594 } 595 return mHasFds; 596 } 597 598 // Write RPC headers. (previously just the interface token) 599 status_t Parcel::writeInterfaceToken(const String16& interface) 600 { 601 writeInt32(IPCThreadState::self()->getStrictModePolicy() | 602 STRICT_MODE_PENALTY_GATHER); 603 // currently the interface identification token is just its name as a string 604 return writeString16(interface); 605 } 606 607 bool Parcel::checkInterface(IBinder* binder) const 608 { 609 return enforceInterface(binder->getInterfaceDescriptor()); 610 } 611 612 bool Parcel::enforceInterface(const String16& interface, 613 IPCThreadState* threadState) const 614 { 615 int32_t strictPolicy = readInt32(); 616 if (threadState == NULL) { 617 threadState = IPCThreadState::self(); 618 } 619 if ((threadState->getLastTransactionBinderFlags() & 620 IBinder::FLAG_ONEWAY) != 0) { 621 // For one-way calls, the callee is running entirely 622 // disconnected from the caller, so disable StrictMode entirely. 623 // Not only does disk/network usage not impact the caller, but 624 // there's no way to commuicate back any violations anyway. 625 threadState->setStrictModePolicy(0); 626 } else { 627 threadState->setStrictModePolicy(strictPolicy); 628 } 629 const String16 str(readString16()); 630 if (str == interface) { 631 return true; 632 } else { 633 ALOGW("**** enforceInterface() expected '%s' but read '%s'", 634 String8(interface).string(), String8(str).string()); 635 return false; 636 } 637 } 638 639 const binder_size_t* Parcel::objects() const 640 { 641 return mObjects; 642 } 643 644 size_t Parcel::objectsCount() const 645 { 646 return mObjectsSize; 647 } 648 649 status_t Parcel::errorCheck() const 650 { 651 return mError; 652 } 653 654 void Parcel::setError(status_t err) 655 { 656 mError = err; 657 } 658 659 status_t Parcel::finishWrite(size_t len) 660 { 661 if (len > INT32_MAX) { 662 // don't accept size_t values which may have come from an 663 // inadvertent conversion from a negative int. 664 return BAD_VALUE; 665 } 666 667 //printf("Finish write of %d\n", len); 668 mDataPos += len; 669 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos); 670 if (mDataPos > mDataSize) { 671 mDataSize = mDataPos; 672 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize); 673 } 674 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 675 return NO_ERROR; 676 } 677 678 status_t Parcel::writeUnpadded(const void* data, size_t len) 679 { 680 if (len > INT32_MAX) { 681 // don't accept size_t values which may have come from an 682 // inadvertent conversion from a negative int. 683 return BAD_VALUE; 684 } 685 686 size_t end = mDataPos + len; 687 if (end < mDataPos) { 688 // integer overflow 689 return BAD_VALUE; 690 } 691 692 if (end <= mDataCapacity) { 693 restart_write: 694 memcpy(mData+mDataPos, data, len); 695 return finishWrite(len); 696 } 697 698 status_t err = growData(len); 699 if (err == NO_ERROR) goto restart_write; 700 return err; 701 } 702 703 status_t Parcel::write(const void* data, size_t len) 704 { 705 if (len > INT32_MAX) { 706 // don't accept size_t values which may have come from an 707 // inadvertent conversion from a negative int. 708 return BAD_VALUE; 709 } 710 711 void* const d = writeInplace(len); 712 if (d) { 713 memcpy(d, data, len); 714 return NO_ERROR; 715 } 716 return mError; 717 } 718 719 void* Parcel::writeInplace(size_t len) 720 { 721 if (len > INT32_MAX) { 722 // don't accept size_t values which may have come from an 723 // inadvertent conversion from a negative int. 724 return NULL; 725 } 726 727 const size_t padded = pad_size(len); 728 729 // sanity check for integer overflow 730 if (mDataPos+padded < mDataPos) { 731 return NULL; 732 } 733 734 if ((mDataPos+padded) <= mDataCapacity) { 735 restart_write: 736 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 737 uint8_t* const data = mData+mDataPos; 738 739 // Need to pad at end? 740 if (padded != len) { 741 #if BYTE_ORDER == BIG_ENDIAN 742 static const uint32_t mask[4] = { 743 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 744 }; 745 #endif 746 #if BYTE_ORDER == LITTLE_ENDIAN 747 static const uint32_t mask[4] = { 748 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 749 }; 750 #endif 751 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 752 // *reinterpret_cast<void**>(data+padded-4)); 753 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 754 } 755 756 finishWrite(padded); 757 return data; 758 } 759 760 status_t err = growData(padded); 761 if (err == NO_ERROR) goto restart_write; 762 return NULL; 763 } 764 765 status_t Parcel::writeUtf8AsUtf16(const std::string& str) { 766 const uint8_t* strData = (uint8_t*)str.data(); 767 const size_t strLen= str.length(); 768 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen); 769 if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) { 770 return BAD_VALUE; 771 } 772 773 status_t err = writeInt32(utf16Len); 774 if (err) { 775 return err; 776 } 777 778 // Allocate enough bytes to hold our converted string and its terminating NULL. 779 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t)); 780 if (!dst) { 781 return NO_MEMORY; 782 } 783 784 utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1); 785 786 return NO_ERROR; 787 } 788 789 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) { 790 if (!str) { 791 return writeInt32(-1); 792 } 793 return writeUtf8AsUtf16(*str); 794 } 795 796 namespace { 797 798 template<typename T> 799 status_t writeByteVectorInternal(Parcel* parcel, const std::vector<T>& val) 800 { 801 status_t status; 802 if (val.size() > std::numeric_limits<int32_t>::max()) { 803 status = BAD_VALUE; 804 return status; 805 } 806 807 status = parcel->writeInt32(val.size()); 808 if (status != OK) { 809 return status; 810 } 811 812 void* data = parcel->writeInplace(val.size()); 813 if (!data) { 814 status = BAD_VALUE; 815 return status; 816 } 817 818 memcpy(data, val.data(), val.size()); 819 return status; 820 } 821 822 template<typename T> 823 status_t writeByteVectorInternalPtr(Parcel* parcel, 824 const std::unique_ptr<std::vector<T>>& val) 825 { 826 if (!val) { 827 return parcel->writeInt32(-1); 828 } 829 830 return writeByteVectorInternal(parcel, *val); 831 } 832 833 } // namespace 834 835 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) { 836 return writeByteVectorInternal(this, val); 837 } 838 839 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val) 840 { 841 return writeByteVectorInternalPtr(this, val); 842 } 843 844 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) { 845 return writeByteVectorInternal(this, val); 846 } 847 848 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val) 849 { 850 return writeByteVectorInternalPtr(this, val); 851 } 852 853 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val) 854 { 855 return writeTypedVector(val, &Parcel::writeInt32); 856 } 857 858 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val) 859 { 860 return writeNullableTypedVector(val, &Parcel::writeInt32); 861 } 862 863 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val) 864 { 865 return writeTypedVector(val, &Parcel::writeInt64); 866 } 867 868 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val) 869 { 870 return writeNullableTypedVector(val, &Parcel::writeInt64); 871 } 872 873 status_t Parcel::writeFloatVector(const std::vector<float>& val) 874 { 875 return writeTypedVector(val, &Parcel::writeFloat); 876 } 877 878 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val) 879 { 880 return writeNullableTypedVector(val, &Parcel::writeFloat); 881 } 882 883 status_t Parcel::writeDoubleVector(const std::vector<double>& val) 884 { 885 return writeTypedVector(val, &Parcel::writeDouble); 886 } 887 888 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val) 889 { 890 return writeNullableTypedVector(val, &Parcel::writeDouble); 891 } 892 893 status_t Parcel::writeBoolVector(const std::vector<bool>& val) 894 { 895 return writeTypedVector(val, &Parcel::writeBool); 896 } 897 898 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val) 899 { 900 return writeNullableTypedVector(val, &Parcel::writeBool); 901 } 902 903 status_t Parcel::writeCharVector(const std::vector<char16_t>& val) 904 { 905 return writeTypedVector(val, &Parcel::writeChar); 906 } 907 908 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val) 909 { 910 return writeNullableTypedVector(val, &Parcel::writeChar); 911 } 912 913 status_t Parcel::writeString16Vector(const std::vector<String16>& val) 914 { 915 return writeTypedVector(val, &Parcel::writeString16); 916 } 917 918 status_t Parcel::writeString16Vector( 919 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val) 920 { 921 return writeNullableTypedVector(val, &Parcel::writeString16); 922 } 923 924 status_t Parcel::writeUtf8VectorAsUtf16Vector( 925 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) { 926 return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16); 927 } 928 929 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) { 930 return writeTypedVector(val, &Parcel::writeUtf8AsUtf16); 931 } 932 933 status_t Parcel::writeInt32(int32_t val) 934 { 935 return writeAligned(val); 936 } 937 938 status_t Parcel::writeUint32(uint32_t val) 939 { 940 return writeAligned(val); 941 } 942 943 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) { 944 if (len > INT32_MAX) { 945 // don't accept size_t values which may have come from an 946 // inadvertent conversion from a negative int. 947 return BAD_VALUE; 948 } 949 950 if (!val) { 951 return writeInt32(-1); 952 } 953 status_t ret = writeInt32(static_cast<uint32_t>(len)); 954 if (ret == NO_ERROR) { 955 ret = write(val, len * sizeof(*val)); 956 } 957 return ret; 958 } 959 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) { 960 if (len > INT32_MAX) { 961 // don't accept size_t values which may have come from an 962 // inadvertent conversion from a negative int. 963 return BAD_VALUE; 964 } 965 966 if (!val) { 967 return writeInt32(-1); 968 } 969 status_t ret = writeInt32(static_cast<uint32_t>(len)); 970 if (ret == NO_ERROR) { 971 ret = write(val, len * sizeof(*val)); 972 } 973 return ret; 974 } 975 976 status_t Parcel::writeBool(bool val) 977 { 978 return writeInt32(int32_t(val)); 979 } 980 981 status_t Parcel::writeChar(char16_t val) 982 { 983 return writeInt32(int32_t(val)); 984 } 985 986 status_t Parcel::writeByte(int8_t val) 987 { 988 return writeInt32(int32_t(val)); 989 } 990 991 status_t Parcel::writeInt64(int64_t val) 992 { 993 return writeAligned(val); 994 } 995 996 status_t Parcel::writeUint64(uint64_t val) 997 { 998 return writeAligned(val); 999 } 1000 1001 status_t Parcel::writePointer(uintptr_t val) 1002 { 1003 return writeAligned<binder_uintptr_t>(val); 1004 } 1005 1006 status_t Parcel::writeFloat(float val) 1007 { 1008 return writeAligned(val); 1009 } 1010 1011 #if defined(__mips__) && defined(__mips_hard_float) 1012 1013 status_t Parcel::writeDouble(double val) 1014 { 1015 union { 1016 double d; 1017 unsigned long long ll; 1018 } u; 1019 u.d = val; 1020 return writeAligned(u.ll); 1021 } 1022 1023 #else 1024 1025 status_t Parcel::writeDouble(double val) 1026 { 1027 return writeAligned(val); 1028 } 1029 1030 #endif 1031 1032 status_t Parcel::writeCString(const char* str) 1033 { 1034 return write(str, strlen(str)+1); 1035 } 1036 1037 status_t Parcel::writeString8(const String8& str) 1038 { 1039 status_t err = writeInt32(str.bytes()); 1040 // only write string if its length is more than zero characters, 1041 // as readString8 will only read if the length field is non-zero. 1042 // this is slightly different from how writeString16 works. 1043 if (str.bytes() > 0 && err == NO_ERROR) { 1044 err = write(str.string(), str.bytes()+1); 1045 } 1046 return err; 1047 } 1048 1049 status_t Parcel::writeString16(const std::unique_ptr<String16>& str) 1050 { 1051 if (!str) { 1052 return writeInt32(-1); 1053 } 1054 1055 return writeString16(*str); 1056 } 1057 1058 status_t Parcel::writeString16(const String16& str) 1059 { 1060 return writeString16(str.string(), str.size()); 1061 } 1062 1063 status_t Parcel::writeString16(const char16_t* str, size_t len) 1064 { 1065 if (str == NULL) return writeInt32(-1); 1066 1067 status_t err = writeInt32(len); 1068 if (err == NO_ERROR) { 1069 len *= sizeof(char16_t); 1070 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 1071 if (data) { 1072 memcpy(data, str, len); 1073 *reinterpret_cast<char16_t*>(data+len) = 0; 1074 return NO_ERROR; 1075 } 1076 err = mError; 1077 } 1078 return err; 1079 } 1080 1081 status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 1082 { 1083 return flatten_binder(ProcessState::self(), val, this); 1084 } 1085 1086 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val) 1087 { 1088 return writeTypedVector(val, &Parcel::writeStrongBinder); 1089 } 1090 1091 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val) 1092 { 1093 return writeNullableTypedVector(val, &Parcel::writeStrongBinder); 1094 } 1095 1096 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const { 1097 return readNullableTypedVector(val, &Parcel::readNullableStrongBinder); 1098 } 1099 1100 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const { 1101 return readTypedVector(val, &Parcel::readStrongBinder); 1102 } 1103 1104 status_t Parcel::writeWeakBinder(const wp<IBinder>& val) 1105 { 1106 return flatten_binder(ProcessState::self(), val, this); 1107 } 1108 1109 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) { 1110 if (!parcelable) { 1111 return writeInt32(0); 1112 } 1113 1114 return writeParcelable(*parcelable); 1115 } 1116 1117 status_t Parcel::writeParcelable(const Parcelable& parcelable) { 1118 status_t status = writeInt32(1); // parcelable is not null. 1119 if (status != OK) { 1120 return status; 1121 } 1122 return parcelable.writeToParcel(this); 1123 } 1124 1125 status_t Parcel::writeValue(const binder::Value& value) { 1126 return value.writeToParcel(this); 1127 } 1128 1129 status_t Parcel::writeNativeHandle(const native_handle* handle) 1130 { 1131 if (!handle || handle->version != sizeof(native_handle)) 1132 return BAD_TYPE; 1133 1134 status_t err; 1135 err = writeInt32(handle->numFds); 1136 if (err != NO_ERROR) return err; 1137 1138 err = writeInt32(handle->numInts); 1139 if (err != NO_ERROR) return err; 1140 1141 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++) 1142 err = writeDupFileDescriptor(handle->data[i]); 1143 1144 if (err != NO_ERROR) { 1145 ALOGD("write native handle, write dup fd failed"); 1146 return err; 1147 } 1148 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts); 1149 return err; 1150 } 1151 1152 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) 1153 { 1154 flat_binder_object obj; 1155 obj.type = BINDER_TYPE_FD; 1156 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 1157 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 1158 obj.handle = fd; 1159 obj.cookie = takeOwnership ? 1 : 0; 1160 return writeObject(obj, true); 1161 } 1162 1163 status_t Parcel::writeDupFileDescriptor(int fd) 1164 { 1165 int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0); 1166 if (dupFd < 0) { 1167 return -errno; 1168 } 1169 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/); 1170 if (err != OK) { 1171 close(dupFd); 1172 } 1173 return err; 1174 } 1175 1176 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership) 1177 { 1178 writeInt32(0); 1179 return writeFileDescriptor(fd, takeOwnership); 1180 } 1181 1182 status_t Parcel::writeUniqueFileDescriptor(const base::unique_fd& fd) { 1183 return writeDupFileDescriptor(fd.get()); 1184 } 1185 1186 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<base::unique_fd>& val) { 1187 return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor); 1188 } 1189 1190 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>>& val) { 1191 return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor); 1192 } 1193 1194 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob) 1195 { 1196 if (len > INT32_MAX) { 1197 // don't accept size_t values which may have come from an 1198 // inadvertent conversion from a negative int. 1199 return BAD_VALUE; 1200 } 1201 1202 status_t status; 1203 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) { 1204 ALOGV("writeBlob: write in place"); 1205 status = writeInt32(BLOB_INPLACE); 1206 if (status) return status; 1207 1208 void* ptr = writeInplace(len); 1209 if (!ptr) return NO_MEMORY; 1210 1211 outBlob->init(-1, ptr, len, false); 1212 return NO_ERROR; 1213 } 1214 1215 ALOGV("writeBlob: write to ashmem"); 1216 int fd = ashmem_create_region("Parcel Blob", len); 1217 if (fd < 0) return NO_MEMORY; 1218 1219 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 1220 if (result < 0) { 1221 status = result; 1222 } else { 1223 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 1224 if (ptr == MAP_FAILED) { 1225 status = -errno; 1226 } else { 1227 if (!mutableCopy) { 1228 result = ashmem_set_prot_region(fd, PROT_READ); 1229 } 1230 if (result < 0) { 1231 status = result; 1232 } else { 1233 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE); 1234 if (!status) { 1235 status = writeFileDescriptor(fd, true /*takeOwnership*/); 1236 if (!status) { 1237 outBlob->init(fd, ptr, len, mutableCopy); 1238 return NO_ERROR; 1239 } 1240 } 1241 } 1242 } 1243 ::munmap(ptr, len); 1244 } 1245 ::close(fd); 1246 return status; 1247 } 1248 1249 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd) 1250 { 1251 // Must match up with what's done in writeBlob. 1252 if (!mAllowFds) return FDS_NOT_ALLOWED; 1253 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE); 1254 if (status) return status; 1255 return writeDupFileDescriptor(fd); 1256 } 1257 1258 status_t Parcel::write(const FlattenableHelperInterface& val) 1259 { 1260 status_t err; 1261 1262 // size if needed 1263 const size_t len = val.getFlattenedSize(); 1264 const size_t fd_count = val.getFdCount(); 1265 1266 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) { 1267 // don't accept size_t values which may have come from an 1268 // inadvertent conversion from a negative int. 1269 return BAD_VALUE; 1270 } 1271 1272 err = this->writeInt32(len); 1273 if (err) return err; 1274 1275 err = this->writeInt32(fd_count); 1276 if (err) return err; 1277 1278 // payload 1279 void* const buf = this->writeInplace(pad_size(len)); 1280 if (buf == NULL) 1281 return BAD_VALUE; 1282 1283 int* fds = NULL; 1284 if (fd_count) { 1285 fds = new (std::nothrow) int[fd_count]; 1286 if (fds == nullptr) { 1287 ALOGE("write: failed to allocate requested %zu fds", fd_count); 1288 return BAD_VALUE; 1289 } 1290 } 1291 1292 err = val.flatten(buf, len, fds, fd_count); 1293 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1294 err = this->writeDupFileDescriptor( fds[i] ); 1295 } 1296 1297 if (fd_count) { 1298 delete [] fds; 1299 } 1300 1301 return err; 1302 } 1303 1304 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData) 1305 { 1306 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 1307 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 1308 if (enoughData && enoughObjects) { 1309 restart_write: 1310 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; 1311 1312 // remember if it's a file descriptor 1313 if (val.type == BINDER_TYPE_FD) { 1314 if (!mAllowFds) { 1315 // fail before modifying our object index 1316 return FDS_NOT_ALLOWED; 1317 } 1318 mHasFds = mFdsKnown = true; 1319 } 1320 1321 // Need to write meta-data? 1322 if (nullMetaData || val.binder != 0) { 1323 mObjects[mObjectsSize] = mDataPos; 1324 acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize); 1325 mObjectsSize++; 1326 } 1327 1328 return finishWrite(sizeof(flat_binder_object)); 1329 } 1330 1331 if (!enoughData) { 1332 const status_t err = growData(sizeof(val)); 1333 if (err != NO_ERROR) return err; 1334 } 1335 if (!enoughObjects) { 1336 size_t newSize = ((mObjectsSize+2)*3)/2; 1337 if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY; // overflow 1338 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 1339 if (objects == NULL) return NO_MEMORY; 1340 mObjects = objects; 1341 mObjectsCapacity = newSize; 1342 } 1343 1344 goto restart_write; 1345 } 1346 1347 status_t Parcel::writeNoException() 1348 { 1349 binder::Status status; 1350 return status.writeToParcel(this); 1351 } 1352 1353 status_t Parcel::writeMap(const ::android::binder::Map& map_in) 1354 { 1355 using ::std::map; 1356 using ::android::binder::Value; 1357 using ::android::binder::Map; 1358 1359 Map::const_iterator iter; 1360 status_t ret; 1361 1362 ret = writeInt32(map_in.size()); 1363 1364 if (ret != NO_ERROR) { 1365 return ret; 1366 } 1367 1368 for (iter = map_in.begin(); iter != map_in.end(); ++iter) { 1369 ret = writeValue(Value(iter->first)); 1370 if (ret != NO_ERROR) { 1371 return ret; 1372 } 1373 1374 ret = writeValue(iter->second); 1375 if (ret != NO_ERROR) { 1376 return ret; 1377 } 1378 } 1379 1380 return ret; 1381 } 1382 1383 status_t Parcel::writeNullableMap(const std::unique_ptr<binder::Map>& map) 1384 { 1385 if (map == NULL) { 1386 return writeInt32(-1); 1387 } 1388 1389 return writeMap(*map.get()); 1390 } 1391 1392 status_t Parcel::readMap(::android::binder::Map* map_out)const 1393 { 1394 using ::std::map; 1395 using ::android::String16; 1396 using ::android::String8; 1397 using ::android::binder::Value; 1398 using ::android::binder::Map; 1399 1400 status_t ret = NO_ERROR; 1401 int32_t count; 1402 1403 ret = readInt32(&count); 1404 if (ret != NO_ERROR) { 1405 return ret; 1406 } 1407 1408 if (count < 0) { 1409 ALOGE("readMap: Unexpected count: %d", count); 1410 return (count == -1) 1411 ? UNEXPECTED_NULL 1412 : BAD_VALUE; 1413 } 1414 1415 map_out->clear(); 1416 1417 while (count--) { 1418 Map::key_type key; 1419 Value value; 1420 1421 ret = readValue(&value); 1422 if (ret != NO_ERROR) { 1423 return ret; 1424 } 1425 1426 if (!value.getString(&key)) { 1427 ALOGE("readMap: Key type not a string (parcelType = %d)", value.parcelType()); 1428 return BAD_VALUE; 1429 } 1430 1431 ret = readValue(&value); 1432 if (ret != NO_ERROR) { 1433 return ret; 1434 } 1435 1436 (*map_out)[key] = value; 1437 } 1438 1439 return ret; 1440 } 1441 1442 status_t Parcel::readNullableMap(std::unique_ptr<binder::Map>* map) const 1443 { 1444 const size_t start = dataPosition(); 1445 int32_t count; 1446 status_t status = readInt32(&count); 1447 map->reset(); 1448 1449 if (status != OK || count == -1) { 1450 return status; 1451 } 1452 1453 setDataPosition(start); 1454 map->reset(new binder::Map()); 1455 1456 status = readMap(map->get()); 1457 1458 if (status != OK) { 1459 map->reset(); 1460 } 1461 1462 return status; 1463 } 1464 1465 1466 1467 void Parcel::remove(size_t /*start*/, size_t /*amt*/) 1468 { 1469 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 1470 } 1471 1472 status_t Parcel::read(void* outData, size_t len) const 1473 { 1474 if (len > INT32_MAX) { 1475 // don't accept size_t values which may have come from an 1476 // inadvertent conversion from a negative int. 1477 return BAD_VALUE; 1478 } 1479 1480 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1481 && len <= pad_size(len)) { 1482 memcpy(outData, mData+mDataPos, len); 1483 mDataPos += pad_size(len); 1484 ALOGV("read Setting data pos of %p to %zu", this, mDataPos); 1485 return NO_ERROR; 1486 } 1487 return NOT_ENOUGH_DATA; 1488 } 1489 1490 const void* Parcel::readInplace(size_t len) const 1491 { 1492 if (len > INT32_MAX) { 1493 // don't accept size_t values which may have come from an 1494 // inadvertent conversion from a negative int. 1495 return NULL; 1496 } 1497 1498 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 1499 && len <= pad_size(len)) { 1500 const void* data = mData+mDataPos; 1501 mDataPos += pad_size(len); 1502 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); 1503 return data; 1504 } 1505 return NULL; 1506 } 1507 1508 template<class T> 1509 status_t Parcel::readAligned(T *pArg) const { 1510 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1511 1512 if ((mDataPos+sizeof(T)) <= mDataSize) { 1513 const void* data = mData+mDataPos; 1514 mDataPos += sizeof(T); 1515 *pArg = *reinterpret_cast<const T*>(data); 1516 return NO_ERROR; 1517 } else { 1518 return NOT_ENOUGH_DATA; 1519 } 1520 } 1521 1522 template<class T> 1523 T Parcel::readAligned() const { 1524 T result; 1525 if (readAligned(&result) != NO_ERROR) { 1526 result = 0; 1527 } 1528 1529 return result; 1530 } 1531 1532 template<class T> 1533 status_t Parcel::writeAligned(T val) { 1534 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 1535 1536 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 1537 restart_write: 1538 *reinterpret_cast<T*>(mData+mDataPos) = val; 1539 return finishWrite(sizeof(val)); 1540 } 1541 1542 status_t err = growData(sizeof(val)); 1543 if (err == NO_ERROR) goto restart_write; 1544 return err; 1545 } 1546 1547 namespace { 1548 1549 template<typename T> 1550 status_t readByteVectorInternal(const Parcel* parcel, 1551 std::vector<T>* val) { 1552 val->clear(); 1553 1554 int32_t size; 1555 status_t status = parcel->readInt32(&size); 1556 1557 if (status != OK) { 1558 return status; 1559 } 1560 1561 if (size < 0) { 1562 status = UNEXPECTED_NULL; 1563 return status; 1564 } 1565 if (size_t(size) > parcel->dataAvail()) { 1566 status = BAD_VALUE; 1567 return status; 1568 } 1569 1570 T* data = const_cast<T*>(reinterpret_cast<const T*>(parcel->readInplace(size))); 1571 if (!data) { 1572 status = BAD_VALUE; 1573 return status; 1574 } 1575 val->reserve(size); 1576 val->insert(val->end(), data, data + size); 1577 1578 return status; 1579 } 1580 1581 template<typename T> 1582 status_t readByteVectorInternalPtr( 1583 const Parcel* parcel, 1584 std::unique_ptr<std::vector<T>>* val) { 1585 const int32_t start = parcel->dataPosition(); 1586 int32_t size; 1587 status_t status = parcel->readInt32(&size); 1588 val->reset(); 1589 1590 if (status != OK || size < 0) { 1591 return status; 1592 } 1593 1594 parcel->setDataPosition(start); 1595 val->reset(new (std::nothrow) std::vector<T>()); 1596 1597 status = readByteVectorInternal(parcel, val->get()); 1598 1599 if (status != OK) { 1600 val->reset(); 1601 } 1602 1603 return status; 1604 } 1605 1606 } // namespace 1607 1608 status_t Parcel::readByteVector(std::vector<int8_t>* val) const { 1609 return readByteVectorInternal(this, val); 1610 } 1611 1612 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const { 1613 return readByteVectorInternal(this, val); 1614 } 1615 1616 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const { 1617 return readByteVectorInternalPtr(this, val); 1618 } 1619 1620 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const { 1621 return readByteVectorInternalPtr(this, val); 1622 } 1623 1624 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const { 1625 return readNullableTypedVector(val, &Parcel::readInt32); 1626 } 1627 1628 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const { 1629 return readTypedVector(val, &Parcel::readInt32); 1630 } 1631 1632 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const { 1633 return readNullableTypedVector(val, &Parcel::readInt64); 1634 } 1635 1636 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const { 1637 return readTypedVector(val, &Parcel::readInt64); 1638 } 1639 1640 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const { 1641 return readNullableTypedVector(val, &Parcel::readFloat); 1642 } 1643 1644 status_t Parcel::readFloatVector(std::vector<float>* val) const { 1645 return readTypedVector(val, &Parcel::readFloat); 1646 } 1647 1648 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const { 1649 return readNullableTypedVector(val, &Parcel::readDouble); 1650 } 1651 1652 status_t Parcel::readDoubleVector(std::vector<double>* val) const { 1653 return readTypedVector(val, &Parcel::readDouble); 1654 } 1655 1656 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const { 1657 const int32_t start = dataPosition(); 1658 int32_t size; 1659 status_t status = readInt32(&size); 1660 val->reset(); 1661 1662 if (status != OK || size < 0) { 1663 return status; 1664 } 1665 1666 setDataPosition(start); 1667 val->reset(new (std::nothrow) std::vector<bool>()); 1668 1669 status = readBoolVector(val->get()); 1670 1671 if (status != OK) { 1672 val->reset(); 1673 } 1674 1675 return status; 1676 } 1677 1678 status_t Parcel::readBoolVector(std::vector<bool>* val) const { 1679 int32_t size; 1680 status_t status = readInt32(&size); 1681 1682 if (status != OK) { 1683 return status; 1684 } 1685 1686 if (size < 0) { 1687 return UNEXPECTED_NULL; 1688 } 1689 1690 val->resize(size); 1691 1692 /* C++ bool handling means a vector of bools isn't necessarily addressable 1693 * (we might use individual bits) 1694 */ 1695 bool data; 1696 for (int32_t i = 0; i < size; ++i) { 1697 status = readBool(&data); 1698 (*val)[i] = data; 1699 1700 if (status != OK) { 1701 return status; 1702 } 1703 } 1704 1705 return OK; 1706 } 1707 1708 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const { 1709 return readNullableTypedVector(val, &Parcel::readChar); 1710 } 1711 1712 status_t Parcel::readCharVector(std::vector<char16_t>* val) const { 1713 return readTypedVector(val, &Parcel::readChar); 1714 } 1715 1716 status_t Parcel::readString16Vector( 1717 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const { 1718 return readNullableTypedVector(val, &Parcel::readString16); 1719 } 1720 1721 status_t Parcel::readString16Vector(std::vector<String16>* val) const { 1722 return readTypedVector(val, &Parcel::readString16); 1723 } 1724 1725 status_t Parcel::readUtf8VectorFromUtf16Vector( 1726 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const { 1727 return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16); 1728 } 1729 1730 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const { 1731 return readTypedVector(val, &Parcel::readUtf8FromUtf16); 1732 } 1733 1734 status_t Parcel::readInt32(int32_t *pArg) const 1735 { 1736 return readAligned(pArg); 1737 } 1738 1739 int32_t Parcel::readInt32() const 1740 { 1741 return readAligned<int32_t>(); 1742 } 1743 1744 status_t Parcel::readUint32(uint32_t *pArg) const 1745 { 1746 return readAligned(pArg); 1747 } 1748 1749 uint32_t Parcel::readUint32() const 1750 { 1751 return readAligned<uint32_t>(); 1752 } 1753 1754 status_t Parcel::readInt64(int64_t *pArg) const 1755 { 1756 return readAligned(pArg); 1757 } 1758 1759 1760 int64_t Parcel::readInt64() const 1761 { 1762 return readAligned<int64_t>(); 1763 } 1764 1765 status_t Parcel::readUint64(uint64_t *pArg) const 1766 { 1767 return readAligned(pArg); 1768 } 1769 1770 uint64_t Parcel::readUint64() const 1771 { 1772 return readAligned<uint64_t>(); 1773 } 1774 1775 status_t Parcel::readPointer(uintptr_t *pArg) const 1776 { 1777 status_t ret; 1778 binder_uintptr_t ptr; 1779 ret = readAligned(&ptr); 1780 if (!ret) 1781 *pArg = ptr; 1782 return ret; 1783 } 1784 1785 uintptr_t Parcel::readPointer() const 1786 { 1787 return readAligned<binder_uintptr_t>(); 1788 } 1789 1790 1791 status_t Parcel::readFloat(float *pArg) const 1792 { 1793 return readAligned(pArg); 1794 } 1795 1796 1797 float Parcel::readFloat() const 1798 { 1799 return readAligned<float>(); 1800 } 1801 1802 #if defined(__mips__) && defined(__mips_hard_float) 1803 1804 status_t Parcel::readDouble(double *pArg) const 1805 { 1806 union { 1807 double d; 1808 unsigned long long ll; 1809 } u; 1810 u.d = 0; 1811 status_t status; 1812 status = readAligned(&u.ll); 1813 *pArg = u.d; 1814 return status; 1815 } 1816 1817 double Parcel::readDouble() const 1818 { 1819 union { 1820 double d; 1821 unsigned long long ll; 1822 } u; 1823 u.ll = readAligned<unsigned long long>(); 1824 return u.d; 1825 } 1826 1827 #else 1828 1829 status_t Parcel::readDouble(double *pArg) const 1830 { 1831 return readAligned(pArg); 1832 } 1833 1834 double Parcel::readDouble() const 1835 { 1836 return readAligned<double>(); 1837 } 1838 1839 #endif 1840 1841 status_t Parcel::readIntPtr(intptr_t *pArg) const 1842 { 1843 return readAligned(pArg); 1844 } 1845 1846 1847 intptr_t Parcel::readIntPtr() const 1848 { 1849 return readAligned<intptr_t>(); 1850 } 1851 1852 status_t Parcel::readBool(bool *pArg) const 1853 { 1854 int32_t tmp; 1855 status_t ret = readInt32(&tmp); 1856 *pArg = (tmp != 0); 1857 return ret; 1858 } 1859 1860 bool Parcel::readBool() const 1861 { 1862 return readInt32() != 0; 1863 } 1864 1865 status_t Parcel::readChar(char16_t *pArg) const 1866 { 1867 int32_t tmp; 1868 status_t ret = readInt32(&tmp); 1869 *pArg = char16_t(tmp); 1870 return ret; 1871 } 1872 1873 char16_t Parcel::readChar() const 1874 { 1875 return char16_t(readInt32()); 1876 } 1877 1878 status_t Parcel::readByte(int8_t *pArg) const 1879 { 1880 int32_t tmp; 1881 status_t ret = readInt32(&tmp); 1882 *pArg = int8_t(tmp); 1883 return ret; 1884 } 1885 1886 int8_t Parcel::readByte() const 1887 { 1888 return int8_t(readInt32()); 1889 } 1890 1891 status_t Parcel::readUtf8FromUtf16(std::string* str) const { 1892 size_t utf16Size = 0; 1893 const char16_t* src = readString16Inplace(&utf16Size); 1894 if (!src) { 1895 return UNEXPECTED_NULL; 1896 } 1897 1898 // Save ourselves the trouble, we're done. 1899 if (utf16Size == 0u) { 1900 str->clear(); 1901 return NO_ERROR; 1902 } 1903 1904 // Allow for closing '\0' 1905 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1; 1906 if (utf8Size < 1) { 1907 return BAD_VALUE; 1908 } 1909 // Note that while it is probably safe to assume string::resize keeps a 1910 // spare byte around for the trailing null, we still pass the size including the trailing null 1911 str->resize(utf8Size); 1912 utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size); 1913 str->resize(utf8Size - 1); 1914 return NO_ERROR; 1915 } 1916 1917 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const { 1918 const int32_t start = dataPosition(); 1919 int32_t size; 1920 status_t status = readInt32(&size); 1921 str->reset(); 1922 1923 if (status != OK || size < 0) { 1924 return status; 1925 } 1926 1927 setDataPosition(start); 1928 str->reset(new (std::nothrow) std::string()); 1929 return readUtf8FromUtf16(str->get()); 1930 } 1931 1932 const char* Parcel::readCString() const 1933 { 1934 const size_t avail = mDataSize-mDataPos; 1935 if (avail > 0) { 1936 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1937 // is the string's trailing NUL within the parcel's valid bounds? 1938 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1939 if (eos) { 1940 const size_t len = eos - str; 1941 mDataPos += pad_size(len+1); 1942 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos); 1943 return str; 1944 } 1945 } 1946 return NULL; 1947 } 1948 1949 String8 Parcel::readString8() const 1950 { 1951 String8 retString; 1952 status_t status = readString8(&retString); 1953 if (status != OK) { 1954 // We don't care about errors here, so just return an empty string. 1955 return String8(); 1956 } 1957 return retString; 1958 } 1959 1960 status_t Parcel::readString8(String8* pArg) const 1961 { 1962 int32_t size; 1963 status_t status = readInt32(&size); 1964 if (status != OK) { 1965 return status; 1966 } 1967 // watch for potential int overflow from size+1 1968 if (size < 0 || size >= INT32_MAX) { 1969 return BAD_VALUE; 1970 } 1971 // |writeString8| writes nothing for empty string. 1972 if (size == 0) { 1973 *pArg = String8(); 1974 return OK; 1975 } 1976 const char* str = (const char*)readInplace(size + 1); 1977 if (str == NULL) { 1978 return BAD_VALUE; 1979 } 1980 pArg->setTo(str, size); 1981 return OK; 1982 } 1983 1984 String16 Parcel::readString16() const 1985 { 1986 size_t len; 1987 const char16_t* str = readString16Inplace(&len); 1988 if (str) return String16(str, len); 1989 ALOGE("Reading a NULL string not supported here."); 1990 return String16(); 1991 } 1992 1993 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const 1994 { 1995 const int32_t start = dataPosition(); 1996 int32_t size; 1997 status_t status = readInt32(&size); 1998 pArg->reset(); 1999 2000 if (status != OK || size < 0) { 2001 return status; 2002 } 2003 2004 setDataPosition(start); 2005 pArg->reset(new (std::nothrow) String16()); 2006 2007 status = readString16(pArg->get()); 2008 2009 if (status != OK) { 2010 pArg->reset(); 2011 } 2012 2013 return status; 2014 } 2015 2016 status_t Parcel::readString16(String16* pArg) const 2017 { 2018 size_t len; 2019 const char16_t* str = readString16Inplace(&len); 2020 if (str) { 2021 pArg->setTo(str, len); 2022 return 0; 2023 } else { 2024 *pArg = String16(); 2025 return UNEXPECTED_NULL; 2026 } 2027 } 2028 2029 const char16_t* Parcel::readString16Inplace(size_t* outLen) const 2030 { 2031 int32_t size = readInt32(); 2032 // watch for potential int overflow from size+1 2033 if (size >= 0 && size < INT32_MAX) { 2034 *outLen = size; 2035 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 2036 if (str != NULL) { 2037 return str; 2038 } 2039 } 2040 *outLen = 0; 2041 return NULL; 2042 } 2043 2044 status_t Parcel::readStrongBinder(sp<IBinder>* val) const 2045 { 2046 status_t status = readNullableStrongBinder(val); 2047 if (status == OK && !val->get()) { 2048 status = UNEXPECTED_NULL; 2049 } 2050 return status; 2051 } 2052 2053 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const 2054 { 2055 return unflatten_binder(ProcessState::self(), *this, val); 2056 } 2057 2058 sp<IBinder> Parcel::readStrongBinder() const 2059 { 2060 sp<IBinder> val; 2061 // Note that a lot of code in Android reads binders by hand with this 2062 // method, and that code has historically been ok with getting nullptr 2063 // back (while ignoring error codes). 2064 readNullableStrongBinder(&val); 2065 return val; 2066 } 2067 2068 wp<IBinder> Parcel::readWeakBinder() const 2069 { 2070 wp<IBinder> val; 2071 unflatten_binder(ProcessState::self(), *this, &val); 2072 return val; 2073 } 2074 2075 status_t Parcel::readParcelable(Parcelable* parcelable) const { 2076 int32_t have_parcelable = 0; 2077 status_t status = readInt32(&have_parcelable); 2078 if (status != OK) { 2079 return status; 2080 } 2081 if (!have_parcelable) { 2082 return UNEXPECTED_NULL; 2083 } 2084 return parcelable->readFromParcel(this); 2085 } 2086 2087 status_t Parcel::readValue(binder::Value* value) const { 2088 return value->readFromParcel(this); 2089 } 2090 2091 int32_t Parcel::readExceptionCode() const 2092 { 2093 binder::Status status; 2094 status.readFromParcel(*this); 2095 return status.exceptionCode(); 2096 } 2097 2098 native_handle* Parcel::readNativeHandle() const 2099 { 2100 int numFds, numInts; 2101 status_t err; 2102 err = readInt32(&numFds); 2103 if (err != NO_ERROR) return 0; 2104 err = readInt32(&numInts); 2105 if (err != NO_ERROR) return 0; 2106 2107 native_handle* h = native_handle_create(numFds, numInts); 2108 if (!h) { 2109 return 0; 2110 } 2111 2112 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) { 2113 h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0); 2114 if (h->data[i] < 0) { 2115 for (int j = 0; j < i; j++) { 2116 close(h->data[j]); 2117 } 2118 native_handle_delete(h); 2119 return 0; 2120 } 2121 } 2122 err = read(h->data + numFds, sizeof(int)*numInts); 2123 if (err != NO_ERROR) { 2124 native_handle_close(h); 2125 native_handle_delete(h); 2126 h = 0; 2127 } 2128 return h; 2129 } 2130 2131 int Parcel::readFileDescriptor() const 2132 { 2133 const flat_binder_object* flat = readObject(true); 2134 2135 if (flat && flat->type == BINDER_TYPE_FD) { 2136 return flat->handle; 2137 } 2138 2139 return BAD_TYPE; 2140 } 2141 2142 int Parcel::readParcelFileDescriptor() const 2143 { 2144 int32_t hasComm = readInt32(); 2145 int fd = readFileDescriptor(); 2146 if (hasComm != 0) { 2147 // skip 2148 readFileDescriptor(); 2149 } 2150 return fd; 2151 } 2152 2153 status_t Parcel::readUniqueFileDescriptor(base::unique_fd* val) const 2154 { 2155 int got = readFileDescriptor(); 2156 2157 if (got == BAD_TYPE) { 2158 return BAD_TYPE; 2159 } 2160 2161 val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0)); 2162 2163 if (val->get() < 0) { 2164 return BAD_VALUE; 2165 } 2166 2167 return OK; 2168 } 2169 2170 2171 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>>* val) const { 2172 return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor); 2173 } 2174 2175 status_t Parcel::readUniqueFileDescriptorVector(std::vector<base::unique_fd>* val) const { 2176 return readTypedVector(val, &Parcel::readUniqueFileDescriptor); 2177 } 2178 2179 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const 2180 { 2181 int32_t blobType; 2182 status_t status = readInt32(&blobType); 2183 if (status) return status; 2184 2185 if (blobType == BLOB_INPLACE) { 2186 ALOGV("readBlob: read in place"); 2187 const void* ptr = readInplace(len); 2188 if (!ptr) return BAD_VALUE; 2189 2190 outBlob->init(-1, const_cast<void*>(ptr), len, false); 2191 return NO_ERROR; 2192 } 2193 2194 ALOGV("readBlob: read from ashmem"); 2195 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE); 2196 int fd = readFileDescriptor(); 2197 if (fd == int(BAD_TYPE)) return BAD_VALUE; 2198 2199 void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ, 2200 MAP_SHARED, fd, 0); 2201 if (ptr == MAP_FAILED) return NO_MEMORY; 2202 2203 outBlob->init(fd, ptr, len, isMutable); 2204 return NO_ERROR; 2205 } 2206 2207 status_t Parcel::read(FlattenableHelperInterface& val) const 2208 { 2209 // size 2210 const size_t len = this->readInt32(); 2211 const size_t fd_count = this->readInt32(); 2212 2213 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) { 2214 // don't accept size_t values which may have come from an 2215 // inadvertent conversion from a negative int. 2216 return BAD_VALUE; 2217 } 2218 2219 // payload 2220 void const* const buf = this->readInplace(pad_size(len)); 2221 if (buf == NULL) 2222 return BAD_VALUE; 2223 2224 int* fds = NULL; 2225 if (fd_count) { 2226 fds = new (std::nothrow) int[fd_count]; 2227 if (fds == nullptr) { 2228 ALOGE("read: failed to allocate requested %zu fds", fd_count); 2229 return BAD_VALUE; 2230 } 2231 } 2232 2233 status_t err = NO_ERROR; 2234 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 2235 int fd = this->readFileDescriptor(); 2236 if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) { 2237 err = BAD_VALUE; 2238 ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s", 2239 i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno)); 2240 // Close all the file descriptors that were dup-ed. 2241 for (size_t j=0; j<i ;j++) { 2242 close(fds[j]); 2243 } 2244 } 2245 } 2246 2247 if (err == NO_ERROR) { 2248 err = val.unflatten(buf, len, fds, fd_count); 2249 } 2250 2251 if (fd_count) { 2252 delete [] fds; 2253 } 2254 2255 return err; 2256 } 2257 const flat_binder_object* Parcel::readObject(bool nullMetaData) const 2258 { 2259 const size_t DPOS = mDataPos; 2260 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) { 2261 const flat_binder_object* obj 2262 = reinterpret_cast<const flat_binder_object*>(mData+DPOS); 2263 mDataPos = DPOS + sizeof(flat_binder_object); 2264 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) { 2265 // When transferring a NULL object, we don't write it into 2266 // the object list, so we don't want to check for it when 2267 // reading. 2268 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 2269 return obj; 2270 } 2271 2272 // Ensure that this object is valid... 2273 binder_size_t* const OBJS = mObjects; 2274 const size_t N = mObjectsSize; 2275 size_t opos = mNextObjectHint; 2276 2277 if (N > 0) { 2278 ALOGV("Parcel %p looking for obj at %zu, hint=%zu", 2279 this, DPOS, opos); 2280 2281 // Start at the current hint position, looking for an object at 2282 // the current data position. 2283 if (opos < N) { 2284 while (opos < (N-1) && OBJS[opos] < DPOS) { 2285 opos++; 2286 } 2287 } else { 2288 opos = N-1; 2289 } 2290 if (OBJS[opos] == DPOS) { 2291 // Found it! 2292 ALOGV("Parcel %p found obj %zu at index %zu with forward search", 2293 this, DPOS, opos); 2294 mNextObjectHint = opos+1; 2295 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 2296 return obj; 2297 } 2298 2299 // Look backwards for it... 2300 while (opos > 0 && OBJS[opos] > DPOS) { 2301 opos--; 2302 } 2303 if (OBJS[opos] == DPOS) { 2304 // Found it! 2305 ALOGV("Parcel %p found obj %zu at index %zu with backward search", 2306 this, DPOS, opos); 2307 mNextObjectHint = opos+1; 2308 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 2309 return obj; 2310 } 2311 } 2312 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list", 2313 this, DPOS); 2314 } 2315 return NULL; 2316 } 2317 2318 void Parcel::closeFileDescriptors() 2319 { 2320 size_t i = mObjectsSize; 2321 if (i > 0) { 2322 //ALOGI("Closing file descriptors for %zu objects...", i); 2323 } 2324 while (i > 0) { 2325 i--; 2326 const flat_binder_object* flat 2327 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 2328 if (flat->type == BINDER_TYPE_FD) { 2329 //ALOGI("Closing fd: %ld", flat->handle); 2330 close(flat->handle); 2331 } 2332 } 2333 } 2334 2335 uintptr_t Parcel::ipcData() const 2336 { 2337 return reinterpret_cast<uintptr_t>(mData); 2338 } 2339 2340 size_t Parcel::ipcDataSize() const 2341 { 2342 return (mDataSize > mDataPos ? mDataSize : mDataPos); 2343 } 2344 2345 uintptr_t Parcel::ipcObjects() const 2346 { 2347 return reinterpret_cast<uintptr_t>(mObjects); 2348 } 2349 2350 size_t Parcel::ipcObjectsCount() const 2351 { 2352 return mObjectsSize; 2353 } 2354 2355 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 2356 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 2357 { 2358 binder_size_t minOffset = 0; 2359 freeDataNoInit(); 2360 mError = NO_ERROR; 2361 mData = const_cast<uint8_t*>(data); 2362 mDataSize = mDataCapacity = dataSize; 2363 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid()); 2364 mDataPos = 0; 2365 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos); 2366 mObjects = const_cast<binder_size_t*>(objects); 2367 mObjectsSize = mObjectsCapacity = objectsCount; 2368 mNextObjectHint = 0; 2369 mOwner = relFunc; 2370 mOwnerCookie = relCookie; 2371 for (size_t i = 0; i < mObjectsSize; i++) { 2372 binder_size_t offset = mObjects[i]; 2373 if (offset < minOffset) { 2374 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n", 2375 __func__, (uint64_t)offset, (uint64_t)minOffset); 2376 mObjectsSize = 0; 2377 break; 2378 } 2379 minOffset = offset + sizeof(flat_binder_object); 2380 } 2381 scanForFds(); 2382 } 2383 2384 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const 2385 { 2386 to << "Parcel("; 2387 2388 if (errorCheck() != NO_ERROR) { 2389 const status_t err = errorCheck(); 2390 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\""; 2391 } else if (dataSize() > 0) { 2392 const uint8_t* DATA = data(); 2393 to << indent << HexDump(DATA, dataSize()) << dedent; 2394 const binder_size_t* OBJS = objects(); 2395 const size_t N = objectsCount(); 2396 for (size_t i=0; i<N; i++) { 2397 const flat_binder_object* flat 2398 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 2399 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 2400 << TypeCode(flat->type & 0x7f7f7f00) 2401 << " = " << flat->binder; 2402 } 2403 } else { 2404 to << "NULL"; 2405 } 2406 2407 to << ")"; 2408 } 2409 2410 void Parcel::releaseObjects() 2411 { 2412 const sp<ProcessState> proc(ProcessState::self()); 2413 size_t i = mObjectsSize; 2414 uint8_t* const data = mData; 2415 binder_size_t* const objects = mObjects; 2416 while (i > 0) { 2417 i--; 2418 const flat_binder_object* flat 2419 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 2420 release_object(proc, *flat, this, &mOpenAshmemSize); 2421 } 2422 } 2423 2424 void Parcel::acquireObjects() 2425 { 2426 const sp<ProcessState> proc(ProcessState::self()); 2427 size_t i = mObjectsSize; 2428 uint8_t* const data = mData; 2429 binder_size_t* const objects = mObjects; 2430 while (i > 0) { 2431 i--; 2432 const flat_binder_object* flat 2433 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 2434 acquire_object(proc, *flat, this, &mOpenAshmemSize); 2435 } 2436 } 2437 2438 void Parcel::freeData() 2439 { 2440 freeDataNoInit(); 2441 initState(); 2442 } 2443 2444 void Parcel::freeDataNoInit() 2445 { 2446 if (mOwner) { 2447 LOG_ALLOC("Parcel %p: freeing other owner data", this); 2448 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 2449 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 2450 } else { 2451 LOG_ALLOC("Parcel %p: freeing allocated data", this); 2452 releaseObjects(); 2453 if (mData) { 2454 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); 2455 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2456 if (mDataCapacity <= gParcelGlobalAllocSize) { 2457 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity; 2458 } else { 2459 gParcelGlobalAllocSize = 0; 2460 } 2461 if (gParcelGlobalAllocCount > 0) { 2462 gParcelGlobalAllocCount--; 2463 } 2464 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2465 free(mData); 2466 } 2467 if (mObjects) free(mObjects); 2468 } 2469 } 2470 2471 status_t Parcel::growData(size_t len) 2472 { 2473 if (len > INT32_MAX) { 2474 // don't accept size_t values which may have come from an 2475 // inadvertent conversion from a negative int. 2476 return BAD_VALUE; 2477 } 2478 2479 size_t newSize = ((mDataSize+len)*3)/2; 2480 return (newSize <= mDataSize) 2481 ? (status_t) NO_MEMORY 2482 : continueWrite(newSize); 2483 } 2484 2485 status_t Parcel::restartWrite(size_t desired) 2486 { 2487 if (desired > INT32_MAX) { 2488 // don't accept size_t values which may have come from an 2489 // inadvertent conversion from a negative int. 2490 return BAD_VALUE; 2491 } 2492 2493 if (mOwner) { 2494 freeData(); 2495 return continueWrite(desired); 2496 } 2497 2498 uint8_t* data = (uint8_t*)realloc(mData, desired); 2499 if (!data && desired > mDataCapacity) { 2500 mError = NO_MEMORY; 2501 return NO_MEMORY; 2502 } 2503 2504 releaseObjects(); 2505 2506 if (data) { 2507 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); 2508 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2509 gParcelGlobalAllocSize += desired; 2510 gParcelGlobalAllocSize -= mDataCapacity; 2511 if (!mData) { 2512 gParcelGlobalAllocCount++; 2513 } 2514 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2515 mData = data; 2516 mDataCapacity = desired; 2517 } 2518 2519 mDataSize = mDataPos = 0; 2520 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize); 2521 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos); 2522 2523 free(mObjects); 2524 mObjects = NULL; 2525 mObjectsSize = mObjectsCapacity = 0; 2526 mNextObjectHint = 0; 2527 mHasFds = false; 2528 mFdsKnown = true; 2529 mAllowFds = true; 2530 2531 return NO_ERROR; 2532 } 2533 2534 status_t Parcel::continueWrite(size_t desired) 2535 { 2536 if (desired > INT32_MAX) { 2537 // don't accept size_t values which may have come from an 2538 // inadvertent conversion from a negative int. 2539 return BAD_VALUE; 2540 } 2541 2542 // If shrinking, first adjust for any objects that appear 2543 // after the new data size. 2544 size_t objectsSize = mObjectsSize; 2545 if (desired < mDataSize) { 2546 if (desired == 0) { 2547 objectsSize = 0; 2548 } else { 2549 while (objectsSize > 0) { 2550 if (mObjects[objectsSize-1] < desired) 2551 break; 2552 objectsSize--; 2553 } 2554 } 2555 } 2556 2557 if (mOwner) { 2558 // If the size is going to zero, just release the owner's data. 2559 if (desired == 0) { 2560 freeData(); 2561 return NO_ERROR; 2562 } 2563 2564 // If there is a different owner, we need to take 2565 // posession. 2566 uint8_t* data = (uint8_t*)malloc(desired); 2567 if (!data) { 2568 mError = NO_MEMORY; 2569 return NO_MEMORY; 2570 } 2571 binder_size_t* objects = NULL; 2572 2573 if (objectsSize) { 2574 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t)); 2575 if (!objects) { 2576 free(data); 2577 2578 mError = NO_MEMORY; 2579 return NO_MEMORY; 2580 } 2581 2582 // Little hack to only acquire references on objects 2583 // we will be keeping. 2584 size_t oldObjectsSize = mObjectsSize; 2585 mObjectsSize = objectsSize; 2586 acquireObjects(); 2587 mObjectsSize = oldObjectsSize; 2588 } 2589 2590 if (mData) { 2591 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 2592 } 2593 if (objects && mObjects) { 2594 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t)); 2595 } 2596 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 2597 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 2598 mOwner = NULL; 2599 2600 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); 2601 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2602 gParcelGlobalAllocSize += desired; 2603 gParcelGlobalAllocCount++; 2604 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2605 2606 mData = data; 2607 mObjects = objects; 2608 mDataSize = (mDataSize < desired) ? mDataSize : desired; 2609 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2610 mDataCapacity = desired; 2611 mObjectsSize = mObjectsCapacity = objectsSize; 2612 mNextObjectHint = 0; 2613 2614 } else if (mData) { 2615 if (objectsSize < mObjectsSize) { 2616 // Need to release refs on any objects we are dropping. 2617 const sp<ProcessState> proc(ProcessState::self()); 2618 for (size_t i=objectsSize; i<mObjectsSize; i++) { 2619 const flat_binder_object* flat 2620 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 2621 if (flat->type == BINDER_TYPE_FD) { 2622 // will need to rescan because we may have lopped off the only FDs 2623 mFdsKnown = false; 2624 } 2625 release_object(proc, *flat, this, &mOpenAshmemSize); 2626 } 2627 binder_size_t* objects = 2628 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t)); 2629 if (objects) { 2630 mObjects = objects; 2631 } 2632 mObjectsSize = objectsSize; 2633 mNextObjectHint = 0; 2634 } 2635 2636 // We own the data, so we can just do a realloc(). 2637 if (desired > mDataCapacity) { 2638 uint8_t* data = (uint8_t*)realloc(mData, desired); 2639 if (data) { 2640 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, 2641 desired); 2642 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2643 gParcelGlobalAllocSize += desired; 2644 gParcelGlobalAllocSize -= mDataCapacity; 2645 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2646 mData = data; 2647 mDataCapacity = desired; 2648 } else if (desired > mDataCapacity) { 2649 mError = NO_MEMORY; 2650 return NO_MEMORY; 2651 } 2652 } else { 2653 if (mDataSize > desired) { 2654 mDataSize = desired; 2655 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2656 } 2657 if (mDataPos > desired) { 2658 mDataPos = desired; 2659 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 2660 } 2661 } 2662 2663 } else { 2664 // This is the first data. Easy! 2665 uint8_t* data = (uint8_t*)malloc(desired); 2666 if (!data) { 2667 mError = NO_MEMORY; 2668 return NO_MEMORY; 2669 } 2670 2671 if(!(mDataCapacity == 0 && mObjects == NULL 2672 && mObjectsCapacity == 0)) { 2673 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired); 2674 } 2675 2676 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); 2677 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 2678 gParcelGlobalAllocSize += desired; 2679 gParcelGlobalAllocCount++; 2680 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 2681 2682 mData = data; 2683 mDataSize = mDataPos = 0; 2684 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 2685 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 2686 mDataCapacity = desired; 2687 } 2688 2689 return NO_ERROR; 2690 } 2691 2692 void Parcel::initState() 2693 { 2694 LOG_ALLOC("Parcel %p: initState", this); 2695 mError = NO_ERROR; 2696 mData = 0; 2697 mDataSize = 0; 2698 mDataCapacity = 0; 2699 mDataPos = 0; 2700 ALOGV("initState Setting data size of %p to %zu", this, mDataSize); 2701 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos); 2702 mObjects = NULL; 2703 mObjectsSize = 0; 2704 mObjectsCapacity = 0; 2705 mNextObjectHint = 0; 2706 mHasFds = false; 2707 mFdsKnown = true; 2708 mAllowFds = true; 2709 mOwner = NULL; 2710 mOpenAshmemSize = 0; 2711 2712 // racing multiple init leads only to multiple identical write 2713 if (gMaxFds == 0) { 2714 struct rlimit result; 2715 if (!getrlimit(RLIMIT_NOFILE, &result)) { 2716 gMaxFds = (size_t)result.rlim_cur; 2717 //ALOGI("parcel fd limit set to %zu", gMaxFds); 2718 } else { 2719 ALOGW("Unable to getrlimit: %s", strerror(errno)); 2720 gMaxFds = 1024; 2721 } 2722 } 2723 } 2724 2725 void Parcel::scanForFds() const 2726 { 2727 bool hasFds = false; 2728 for (size_t i=0; i<mObjectsSize; i++) { 2729 const flat_binder_object* flat 2730 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 2731 if (flat->type == BINDER_TYPE_FD) { 2732 hasFds = true; 2733 break; 2734 } 2735 } 2736 mHasFds = hasFds; 2737 mFdsKnown = true; 2738 } 2739 2740 size_t Parcel::getBlobAshmemSize() const 2741 { 2742 // This used to return the size of all blobs that were written to ashmem, now we're returning 2743 // the ashmem currently referenced by this Parcel, which should be equivalent. 2744 // TODO: Remove method once ABI can be changed. 2745 return mOpenAshmemSize; 2746 } 2747 2748 size_t Parcel::getOpenAshmemSize() const 2749 { 2750 return mOpenAshmemSize; 2751 } 2752 2753 // --- Parcel::Blob --- 2754 2755 Parcel::Blob::Blob() : 2756 mFd(-1), mData(NULL), mSize(0), mMutable(false) { 2757 } 2758 2759 Parcel::Blob::~Blob() { 2760 release(); 2761 } 2762 2763 void Parcel::Blob::release() { 2764 if (mFd != -1 && mData) { 2765 ::munmap(mData, mSize); 2766 } 2767 clear(); 2768 } 2769 2770 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) { 2771 mFd = fd; 2772 mData = data; 2773 mSize = size; 2774 mMutable = isMutable; 2775 } 2776 2777 void Parcel::Blob::clear() { 2778 mFd = -1; 2779 mData = NULL; 2780 mSize = 0; 2781 mMutable = false; 2782 } 2783 2784 }; // namespace android 2785