1 /* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "Parcel" 18 //#define LOG_NDEBUG 0 19 20 #include <binder/Parcel.h> 21 22 #include <binder/IPCThreadState.h> 23 #include <binder/Binder.h> 24 #include <binder/BpBinder.h> 25 #include <binder/ProcessState.h> 26 #include <binder/TextOutput.h> 27 28 #include <errno.h> 29 #include <utils/CallStack.h> 30 #include <utils/Debug.h> 31 #include <utils/Log.h> 32 #include <utils/String8.h> 33 #include <utils/String16.h> 34 #include <utils/misc.h> 35 #include <utils/Flattenable.h> 36 #include <cutils/ashmem.h> 37 38 #include <private/binder/binder_module.h> 39 40 #include <fcntl.h> 41 #include <inttypes.h> 42 #include <stdio.h> 43 #include <stdlib.h> 44 #include <stdint.h> 45 #include <sys/mman.h> 46 47 #ifndef INT32_MAX 48 #define INT32_MAX ((int32_t)(2147483647)) 49 #endif 50 51 #define LOG_REFS(...) 52 //#define LOG_REFS(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__) 53 54 // --------------------------------------------------------------------------- 55 56 #define PAD_SIZE(s) (((s)+3)&~3) 57 58 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 59 #define STRICT_MODE_PENALTY_GATHER 0x100 60 61 // Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER 62 #define EX_HAS_REPLY_HEADER -128 63 64 // Maximum size of a blob to transfer in-place. 65 static const size_t IN_PLACE_BLOB_LIMIT = 40 * 1024; 66 67 // XXX This can be made public if we want to provide 68 // support for typed data. 69 struct small_flat_data 70 { 71 uint32_t type; 72 uint32_t data; 73 }; 74 75 namespace android { 76 77 void acquire_object(const sp<ProcessState>& proc, 78 const flat_binder_object& obj, const void* who) 79 { 80 switch (obj.type) { 81 case BINDER_TYPE_BINDER: 82 if (obj.binder) { 83 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 84 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); 85 } 86 return; 87 case BINDER_TYPE_WEAK_BINDER: 88 if (obj.binder) 89 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 90 return; 91 case BINDER_TYPE_HANDLE: { 92 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 93 if (b != NULL) { 94 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 95 b->incStrong(who); 96 } 97 return; 98 } 99 case BINDER_TYPE_WEAK_HANDLE: { 100 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 101 if (b != NULL) b.get_refs()->incWeak(who); 102 return; 103 } 104 case BINDER_TYPE_FD: { 105 // intentionally blank -- nothing to do to acquire this, but we do 106 // recognize it as a legitimate object type. 107 return; 108 } 109 } 110 111 ALOGD("Invalid object type 0x%08x", obj.type); 112 } 113 114 void release_object(const sp<ProcessState>& proc, 115 const flat_binder_object& obj, const void* who) 116 { 117 switch (obj.type) { 118 case BINDER_TYPE_BINDER: 119 if (obj.binder) { 120 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 121 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who); 122 } 123 return; 124 case BINDER_TYPE_WEAK_BINDER: 125 if (obj.binder) 126 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 127 return; 128 case BINDER_TYPE_HANDLE: { 129 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 130 if (b != NULL) { 131 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 132 b->decStrong(who); 133 } 134 return; 135 } 136 case BINDER_TYPE_WEAK_HANDLE: { 137 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 138 if (b != NULL) b.get_refs()->decWeak(who); 139 return; 140 } 141 case BINDER_TYPE_FD: { 142 if (obj.cookie != 0) close(obj.handle); 143 return; 144 } 145 } 146 147 ALOGE("Invalid object type 0x%08x", obj.type); 148 } 149 150 inline static status_t finish_flatten_binder( 151 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out) 152 { 153 return out->writeObject(flat, false); 154 } 155 156 status_t flatten_binder(const sp<ProcessState>& /*proc*/, 157 const sp<IBinder>& binder, Parcel* out) 158 { 159 flat_binder_object obj; 160 161 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 162 if (binder != NULL) { 163 IBinder *local = binder->localBinder(); 164 if (!local) { 165 BpBinder *proxy = binder->remoteBinder(); 166 if (proxy == NULL) { 167 ALOGE("null proxy"); 168 } 169 const int32_t handle = proxy ? proxy->handle() : 0; 170 obj.type = BINDER_TYPE_HANDLE; 171 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 172 obj.handle = handle; 173 obj.cookie = 0; 174 } else { 175 obj.type = BINDER_TYPE_BINDER; 176 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs()); 177 obj.cookie = reinterpret_cast<uintptr_t>(local); 178 } 179 } else { 180 obj.type = BINDER_TYPE_BINDER; 181 obj.binder = 0; 182 obj.cookie = 0; 183 } 184 185 return finish_flatten_binder(binder, obj, out); 186 } 187 188 status_t flatten_binder(const sp<ProcessState>& /*proc*/, 189 const wp<IBinder>& binder, Parcel* out) 190 { 191 flat_binder_object obj; 192 193 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 194 if (binder != NULL) { 195 sp<IBinder> real = binder.promote(); 196 if (real != NULL) { 197 IBinder *local = real->localBinder(); 198 if (!local) { 199 BpBinder *proxy = real->remoteBinder(); 200 if (proxy == NULL) { 201 ALOGE("null proxy"); 202 } 203 const int32_t handle = proxy ? proxy->handle() : 0; 204 obj.type = BINDER_TYPE_WEAK_HANDLE; 205 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 206 obj.handle = handle; 207 obj.cookie = 0; 208 } else { 209 obj.type = BINDER_TYPE_WEAK_BINDER; 210 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs()); 211 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get()); 212 } 213 return finish_flatten_binder(real, obj, out); 214 } 215 216 // XXX How to deal? In order to flatten the given binder, 217 // we need to probe it for information, which requires a primary 218 // reference... but we don't have one. 219 // 220 // The OpenBinder implementation uses a dynamic_cast<> here, 221 // but we can't do that with the different reference counting 222 // implementation we are using. 223 ALOGE("Unable to unflatten Binder weak reference!"); 224 obj.type = BINDER_TYPE_BINDER; 225 obj.binder = 0; 226 obj.cookie = 0; 227 return finish_flatten_binder(NULL, obj, out); 228 229 } else { 230 obj.type = BINDER_TYPE_BINDER; 231 obj.binder = 0; 232 obj.cookie = 0; 233 return finish_flatten_binder(NULL, obj, out); 234 } 235 } 236 237 inline static status_t finish_unflatten_binder( 238 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/, 239 const Parcel& /*in*/) 240 { 241 return NO_ERROR; 242 } 243 244 status_t unflatten_binder(const sp<ProcessState>& proc, 245 const Parcel& in, sp<IBinder>* out) 246 { 247 const flat_binder_object* flat = in.readObject(false); 248 249 if (flat) { 250 switch (flat->type) { 251 case BINDER_TYPE_BINDER: 252 *out = reinterpret_cast<IBinder*>(flat->cookie); 253 return finish_unflatten_binder(NULL, *flat, in); 254 case BINDER_TYPE_HANDLE: 255 *out = proc->getStrongProxyForHandle(flat->handle); 256 return finish_unflatten_binder( 257 static_cast<BpBinder*>(out->get()), *flat, in); 258 } 259 } 260 return BAD_TYPE; 261 } 262 263 status_t unflatten_binder(const sp<ProcessState>& proc, 264 const Parcel& in, wp<IBinder>* out) 265 { 266 const flat_binder_object* flat = in.readObject(false); 267 268 if (flat) { 269 switch (flat->type) { 270 case BINDER_TYPE_BINDER: 271 *out = reinterpret_cast<IBinder*>(flat->cookie); 272 return finish_unflatten_binder(NULL, *flat, in); 273 case BINDER_TYPE_WEAK_BINDER: 274 if (flat->binder != 0) { 275 out->set_object_and_refs( 276 reinterpret_cast<IBinder*>(flat->cookie), 277 reinterpret_cast<RefBase::weakref_type*>(flat->binder)); 278 } else { 279 *out = NULL; 280 } 281 return finish_unflatten_binder(NULL, *flat, in); 282 case BINDER_TYPE_HANDLE: 283 case BINDER_TYPE_WEAK_HANDLE: 284 *out = proc->getWeakProxyForHandle(flat->handle); 285 return finish_unflatten_binder( 286 static_cast<BpBinder*>(out->unsafe_get()), *flat, in); 287 } 288 } 289 return BAD_TYPE; 290 } 291 292 // --------------------------------------------------------------------------- 293 294 Parcel::Parcel() 295 { 296 initState(); 297 } 298 299 Parcel::~Parcel() 300 { 301 freeDataNoInit(); 302 } 303 304 const uint8_t* Parcel::data() const 305 { 306 return mData; 307 } 308 309 size_t Parcel::dataSize() const 310 { 311 return (mDataSize > mDataPos ? mDataSize : mDataPos); 312 } 313 314 size_t Parcel::dataAvail() const 315 { 316 // TODO: decide what to do about the possibility that this can 317 // report an available-data size that exceeds a Java int's max 318 // positive value, causing havoc. Fortunately this will only 319 // happen if someone constructs a Parcel containing more than two 320 // gigabytes of data, which on typical phone hardware is simply 321 // not possible. 322 return dataSize() - dataPosition(); 323 } 324 325 size_t Parcel::dataPosition() const 326 { 327 return mDataPos; 328 } 329 330 size_t Parcel::dataCapacity() const 331 { 332 return mDataCapacity; 333 } 334 335 status_t Parcel::setDataSize(size_t size) 336 { 337 status_t err; 338 err = continueWrite(size); 339 if (err == NO_ERROR) { 340 mDataSize = size; 341 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize); 342 } 343 return err; 344 } 345 346 void Parcel::setDataPosition(size_t pos) const 347 { 348 mDataPos = pos; 349 mNextObjectHint = 0; 350 } 351 352 status_t Parcel::setDataCapacity(size_t size) 353 { 354 if (size > mDataCapacity) return continueWrite(size); 355 return NO_ERROR; 356 } 357 358 status_t Parcel::setData(const uint8_t* buffer, size_t len) 359 { 360 status_t err = restartWrite(len); 361 if (err == NO_ERROR) { 362 memcpy(const_cast<uint8_t*>(data()), buffer, len); 363 mDataSize = len; 364 mFdsKnown = false; 365 } 366 return err; 367 } 368 369 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len) 370 { 371 const sp<ProcessState> proc(ProcessState::self()); 372 status_t err; 373 const uint8_t *data = parcel->mData; 374 const binder_size_t *objects = parcel->mObjects; 375 size_t size = parcel->mObjectsSize; 376 int startPos = mDataPos; 377 int firstIndex = -1, lastIndex = -2; 378 379 if (len == 0) { 380 return NO_ERROR; 381 } 382 383 // range checks against the source parcel size 384 if ((offset > parcel->mDataSize) 385 || (len > parcel->mDataSize) 386 || (offset + len > parcel->mDataSize)) { 387 return BAD_VALUE; 388 } 389 390 // Count objects in range 391 for (int i = 0; i < (int) size; i++) { 392 size_t off = objects[i]; 393 if ((off >= offset) && (off < offset + len)) { 394 if (firstIndex == -1) { 395 firstIndex = i; 396 } 397 lastIndex = i; 398 } 399 } 400 int numObjects = lastIndex - firstIndex + 1; 401 402 if ((mDataSize+len) > mDataCapacity) { 403 // grow data 404 err = growData(len); 405 if (err != NO_ERROR) { 406 return err; 407 } 408 } 409 410 // append data 411 memcpy(mData + mDataPos, data + offset, len); 412 mDataPos += len; 413 mDataSize += len; 414 415 err = NO_ERROR; 416 417 if (numObjects > 0) { 418 // grow objects 419 if (mObjectsCapacity < mObjectsSize + numObjects) { 420 int newSize = ((mObjectsSize + numObjects)*3)/2; 421 binder_size_t *objects = 422 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 423 if (objects == (binder_size_t*)0) { 424 return NO_MEMORY; 425 } 426 mObjects = objects; 427 mObjectsCapacity = newSize; 428 } 429 430 // append and acquire objects 431 int idx = mObjectsSize; 432 for (int i = firstIndex; i <= lastIndex; i++) { 433 size_t off = objects[i] - offset + startPos; 434 mObjects[idx++] = off; 435 mObjectsSize++; 436 437 flat_binder_object* flat 438 = reinterpret_cast<flat_binder_object*>(mData + off); 439 acquire_object(proc, *flat, this); 440 441 if (flat->type == BINDER_TYPE_FD) { 442 // If this is a file descriptor, we need to dup it so the 443 // new Parcel now owns its own fd, and can declare that we 444 // officially know we have fds. 445 flat->handle = dup(flat->handle); 446 flat->cookie = 1; 447 mHasFds = mFdsKnown = true; 448 if (!mAllowFds) { 449 err = FDS_NOT_ALLOWED; 450 } 451 } 452 } 453 } 454 455 return err; 456 } 457 458 bool Parcel::pushAllowFds(bool allowFds) 459 { 460 const bool origValue = mAllowFds; 461 if (!allowFds) { 462 mAllowFds = false; 463 } 464 return origValue; 465 } 466 467 void Parcel::restoreAllowFds(bool lastValue) 468 { 469 mAllowFds = lastValue; 470 } 471 472 bool Parcel::hasFileDescriptors() const 473 { 474 if (!mFdsKnown) { 475 scanForFds(); 476 } 477 return mHasFds; 478 } 479 480 // Write RPC headers. (previously just the interface token) 481 status_t Parcel::writeInterfaceToken(const String16& interface) 482 { 483 writeInt32(IPCThreadState::self()->getStrictModePolicy() | 484 STRICT_MODE_PENALTY_GATHER); 485 // currently the interface identification token is just its name as a string 486 return writeString16(interface); 487 } 488 489 bool Parcel::checkInterface(IBinder* binder) const 490 { 491 return enforceInterface(binder->getInterfaceDescriptor()); 492 } 493 494 bool Parcel::enforceInterface(const String16& interface, 495 IPCThreadState* threadState) const 496 { 497 int32_t strictPolicy = readInt32(); 498 if (threadState == NULL) { 499 threadState = IPCThreadState::self(); 500 } 501 if ((threadState->getLastTransactionBinderFlags() & 502 IBinder::FLAG_ONEWAY) != 0) { 503 // For one-way calls, the callee is running entirely 504 // disconnected from the caller, so disable StrictMode entirely. 505 // Not only does disk/network usage not impact the caller, but 506 // there's no way to commuicate back any violations anyway. 507 threadState->setStrictModePolicy(0); 508 } else { 509 threadState->setStrictModePolicy(strictPolicy); 510 } 511 const String16 str(readString16()); 512 if (str == interface) { 513 return true; 514 } else { 515 ALOGW("**** enforceInterface() expected '%s' but read '%s'", 516 String8(interface).string(), String8(str).string()); 517 return false; 518 } 519 } 520 521 const binder_size_t* Parcel::objects() const 522 { 523 return mObjects; 524 } 525 526 size_t Parcel::objectsCount() const 527 { 528 return mObjectsSize; 529 } 530 531 status_t Parcel::errorCheck() const 532 { 533 return mError; 534 } 535 536 void Parcel::setError(status_t err) 537 { 538 mError = err; 539 } 540 541 status_t Parcel::finishWrite(size_t len) 542 { 543 //printf("Finish write of %d\n", len); 544 mDataPos += len; 545 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos); 546 if (mDataPos > mDataSize) { 547 mDataSize = mDataPos; 548 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize); 549 } 550 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 551 return NO_ERROR; 552 } 553 554 status_t Parcel::writeUnpadded(const void* data, size_t len) 555 { 556 size_t end = mDataPos + len; 557 if (end < mDataPos) { 558 // integer overflow 559 return BAD_VALUE; 560 } 561 562 if (end <= mDataCapacity) { 563 restart_write: 564 memcpy(mData+mDataPos, data, len); 565 return finishWrite(len); 566 } 567 568 status_t err = growData(len); 569 if (err == NO_ERROR) goto restart_write; 570 return err; 571 } 572 573 status_t Parcel::write(const void* data, size_t len) 574 { 575 void* const d = writeInplace(len); 576 if (d) { 577 memcpy(d, data, len); 578 return NO_ERROR; 579 } 580 return mError; 581 } 582 583 void* Parcel::writeInplace(size_t len) 584 { 585 const size_t padded = PAD_SIZE(len); 586 587 // sanity check for integer overflow 588 if (mDataPos+padded < mDataPos) { 589 return NULL; 590 } 591 592 if ((mDataPos+padded) <= mDataCapacity) { 593 restart_write: 594 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 595 uint8_t* const data = mData+mDataPos; 596 597 // Need to pad at end? 598 if (padded != len) { 599 #if BYTE_ORDER == BIG_ENDIAN 600 static const uint32_t mask[4] = { 601 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 602 }; 603 #endif 604 #if BYTE_ORDER == LITTLE_ENDIAN 605 static const uint32_t mask[4] = { 606 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 607 }; 608 #endif 609 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 610 // *reinterpret_cast<void**>(data+padded-4)); 611 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 612 } 613 614 finishWrite(padded); 615 return data; 616 } 617 618 status_t err = growData(padded); 619 if (err == NO_ERROR) goto restart_write; 620 return NULL; 621 } 622 623 status_t Parcel::writeInt32(int32_t val) 624 { 625 return writeAligned(val); 626 } 627 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) { 628 if (!val) { 629 return writeAligned(-1); 630 } 631 status_t ret = writeAligned(len); 632 if (ret == NO_ERROR) { 633 ret = write(val, len * sizeof(*val)); 634 } 635 return ret; 636 } 637 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) { 638 if (!val) { 639 return writeAligned(-1); 640 } 641 status_t ret = writeAligned(len); 642 if (ret == NO_ERROR) { 643 ret = write(val, len * sizeof(*val)); 644 } 645 return ret; 646 } 647 648 status_t Parcel::writeInt64(int64_t val) 649 { 650 return writeAligned(val); 651 } 652 653 status_t Parcel::writePointer(uintptr_t val) 654 { 655 return writeAligned<binder_uintptr_t>(val); 656 } 657 658 status_t Parcel::writeFloat(float val) 659 { 660 return writeAligned(val); 661 } 662 663 #if defined(__mips__) && defined(__mips_hard_float) 664 665 status_t Parcel::writeDouble(double val) 666 { 667 union { 668 double d; 669 unsigned long long ll; 670 } u; 671 u.d = val; 672 return writeAligned(u.ll); 673 } 674 675 #else 676 677 status_t Parcel::writeDouble(double val) 678 { 679 return writeAligned(val); 680 } 681 682 #endif 683 684 status_t Parcel::writeCString(const char* str) 685 { 686 return write(str, strlen(str)+1); 687 } 688 689 status_t Parcel::writeString8(const String8& str) 690 { 691 status_t err = writeInt32(str.bytes()); 692 // only write string if its length is more than zero characters, 693 // as readString8 will only read if the length field is non-zero. 694 // this is slightly different from how writeString16 works. 695 if (str.bytes() > 0 && err == NO_ERROR) { 696 err = write(str.string(), str.bytes()+1); 697 } 698 return err; 699 } 700 701 status_t Parcel::writeString16(const String16& str) 702 { 703 return writeString16(str.string(), str.size()); 704 } 705 706 status_t Parcel::writeString16(const char16_t* str, size_t len) 707 { 708 if (str == NULL) return writeInt32(-1); 709 710 status_t err = writeInt32(len); 711 if (err == NO_ERROR) { 712 len *= sizeof(char16_t); 713 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 714 if (data) { 715 memcpy(data, str, len); 716 *reinterpret_cast<char16_t*>(data+len) = 0; 717 return NO_ERROR; 718 } 719 err = mError; 720 } 721 return err; 722 } 723 724 status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 725 { 726 return flatten_binder(ProcessState::self(), val, this); 727 } 728 729 status_t Parcel::writeWeakBinder(const wp<IBinder>& val) 730 { 731 return flatten_binder(ProcessState::self(), val, this); 732 } 733 734 status_t Parcel::writeNativeHandle(const native_handle* handle) 735 { 736 if (!handle || handle->version != sizeof(native_handle)) 737 return BAD_TYPE; 738 739 status_t err; 740 err = writeInt32(handle->numFds); 741 if (err != NO_ERROR) return err; 742 743 err = writeInt32(handle->numInts); 744 if (err != NO_ERROR) return err; 745 746 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++) 747 err = writeDupFileDescriptor(handle->data[i]); 748 749 if (err != NO_ERROR) { 750 ALOGD("write native handle, write dup fd failed"); 751 return err; 752 } 753 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts); 754 return err; 755 } 756 757 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) 758 { 759 flat_binder_object obj; 760 obj.type = BINDER_TYPE_FD; 761 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS; 762 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 763 obj.handle = fd; 764 obj.cookie = takeOwnership ? 1 : 0; 765 return writeObject(obj, true); 766 } 767 768 status_t Parcel::writeDupFileDescriptor(int fd) 769 { 770 int dupFd = dup(fd); 771 772 { // Temporary extra debug validation for b/17477219: a Parcel recipient is 773 // getting a positive but invalid fd unexpectedly. Trying to track down 774 // where it's coming from. 775 int dupErrno = dupFd < 0 ? errno : 0; 776 int fdFlags = fcntl(fd, F_GETFD); 777 int fdFlagsErrno = fdFlags == -1 ? errno : 0; 778 int dupFlags = fcntl(dupFd, F_GETFD); 779 int dupFlagsErrno = dupFlags == -1 ? errno : 0; 780 if (dupFd < 0 || fdFlags == -1 || dupFlags == -1) { 781 ALOGE("Parcel::writeDupFileDescriptor failed:\n" 782 " fd=%d flags=%d err=%d(%s)\n" 783 " dupFd=%d dupErr=%d(%s) flags=%d err=%d(%s)", 784 fd, fdFlags, fdFlagsErrno, strerror(fdFlagsErrno), 785 dupFd, dupErrno, strerror(dupErrno), 786 dupFlags, dupFlagsErrno, strerror(dupFlagsErrno)); 787 if (fd < 0 || fdFlags == -1) { 788 CallStack(LOG_TAG); 789 } 790 return -errno; 791 } 792 } 793 794 if (dupFd < 0) { 795 return -errno; 796 } 797 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/); 798 if (err) { 799 close(dupFd); 800 } 801 return err; 802 } 803 804 // WARNING: This method must stay in sync with 805 // Parcelable.Creator<ParcelFileDescriptor> CREATOR 806 // in frameworks/base/core/java/android/os/ParcelFileDescriptor.java 807 status_t Parcel::writeParcelFileDescriptor(int fd, int commChannel) { 808 status_t status; 809 810 if (fd < 0) { 811 status = writeInt32(0); // ParcelFileDescriptor is null 812 if (status) return status; 813 } else { 814 status = writeInt32(1); // ParcelFileDescriptor is not null 815 if (status) return status; 816 status = writeDupFileDescriptor(fd); 817 if (status) return status; 818 if (commChannel < 0) { 819 status = writeInt32(0); // commChannel is null 820 if (status) return status; 821 } else { 822 status = writeInt32(1); // commChannel is not null 823 if (status) return status; 824 status = writeDupFileDescriptor(commChannel); 825 } 826 } 827 return status; 828 } 829 830 status_t Parcel::writeBlob(size_t len, WritableBlob* outBlob) 831 { 832 status_t status; 833 834 if (!mAllowFds || len <= IN_PLACE_BLOB_LIMIT) { 835 ALOGV("writeBlob: write in place"); 836 status = writeInt32(0); 837 if (status) return status; 838 839 void* ptr = writeInplace(len); 840 if (!ptr) return NO_MEMORY; 841 842 outBlob->init(false /*mapped*/, ptr, len); 843 return NO_ERROR; 844 } 845 846 ALOGV("writeBlob: write to ashmem"); 847 int fd = ashmem_create_region("Parcel Blob", len); 848 if (fd < 0) return NO_MEMORY; 849 850 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); 851 if (result < 0) { 852 status = result; 853 } else { 854 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 855 if (ptr == MAP_FAILED) { 856 status = -errno; 857 } else { 858 result = ashmem_set_prot_region(fd, PROT_READ); 859 if (result < 0) { 860 status = result; 861 } else { 862 status = writeInt32(1); 863 if (!status) { 864 status = writeFileDescriptor(fd, true /*takeOwnership*/); 865 if (!status) { 866 outBlob->init(true /*mapped*/, ptr, len); 867 return NO_ERROR; 868 } 869 } 870 } 871 } 872 ::munmap(ptr, len); 873 } 874 ::close(fd); 875 return status; 876 } 877 878 status_t Parcel::write(const FlattenableHelperInterface& val) 879 { 880 status_t err; 881 882 // size if needed 883 const size_t len = val.getFlattenedSize(); 884 const size_t fd_count = val.getFdCount(); 885 886 err = this->writeInt32(len); 887 if (err) return err; 888 889 err = this->writeInt32(fd_count); 890 if (err) return err; 891 892 // payload 893 void* const buf = this->writeInplace(PAD_SIZE(len)); 894 if (buf == NULL) 895 return BAD_VALUE; 896 897 int* fds = NULL; 898 if (fd_count) { 899 fds = new int[fd_count]; 900 } 901 902 err = val.flatten(buf, len, fds, fd_count); 903 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 904 err = this->writeDupFileDescriptor( fds[i] ); 905 } 906 907 if (fd_count) { 908 delete [] fds; 909 } 910 911 return err; 912 } 913 914 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData) 915 { 916 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 917 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 918 if (enoughData && enoughObjects) { 919 restart_write: 920 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; 921 922 // Need to write meta-data? 923 if (nullMetaData || val.binder != 0) { 924 mObjects[mObjectsSize] = mDataPos; 925 acquire_object(ProcessState::self(), val, this); 926 mObjectsSize++; 927 } 928 929 // remember if it's a file descriptor 930 if (val.type == BINDER_TYPE_FD) { 931 if (!mAllowFds) { 932 return FDS_NOT_ALLOWED; 933 } 934 mHasFds = mFdsKnown = true; 935 } 936 937 return finishWrite(sizeof(flat_binder_object)); 938 } 939 940 if (!enoughData) { 941 const status_t err = growData(sizeof(val)); 942 if (err != NO_ERROR) return err; 943 } 944 if (!enoughObjects) { 945 size_t newSize = ((mObjectsSize+2)*3)/2; 946 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 947 if (objects == NULL) return NO_MEMORY; 948 mObjects = objects; 949 mObjectsCapacity = newSize; 950 } 951 952 goto restart_write; 953 } 954 955 status_t Parcel::writeNoException() 956 { 957 return writeInt32(0); 958 } 959 960 void Parcel::remove(size_t /*start*/, size_t /*amt*/) 961 { 962 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 963 } 964 965 status_t Parcel::read(void* outData, size_t len) const 966 { 967 if ((mDataPos+PAD_SIZE(len)) >= mDataPos && (mDataPos+PAD_SIZE(len)) <= mDataSize 968 && len <= PAD_SIZE(len)) { 969 memcpy(outData, mData+mDataPos, len); 970 mDataPos += PAD_SIZE(len); 971 ALOGV("read Setting data pos of %p to %zu", this, mDataPos); 972 return NO_ERROR; 973 } 974 return NOT_ENOUGH_DATA; 975 } 976 977 const void* Parcel::readInplace(size_t len) const 978 { 979 if ((mDataPos+PAD_SIZE(len)) >= mDataPos && (mDataPos+PAD_SIZE(len)) <= mDataSize 980 && len <= PAD_SIZE(len)) { 981 const void* data = mData+mDataPos; 982 mDataPos += PAD_SIZE(len); 983 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); 984 return data; 985 } 986 return NULL; 987 } 988 989 template<class T> 990 status_t Parcel::readAligned(T *pArg) const { 991 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T)); 992 993 if ((mDataPos+sizeof(T)) <= mDataSize) { 994 const void* data = mData+mDataPos; 995 mDataPos += sizeof(T); 996 *pArg = *reinterpret_cast<const T*>(data); 997 return NO_ERROR; 998 } else { 999 return NOT_ENOUGH_DATA; 1000 } 1001 } 1002 1003 template<class T> 1004 T Parcel::readAligned() const { 1005 T result; 1006 if (readAligned(&result) != NO_ERROR) { 1007 result = 0; 1008 } 1009 1010 return result; 1011 } 1012 1013 template<class T> 1014 status_t Parcel::writeAligned(T val) { 1015 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T)); 1016 1017 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 1018 restart_write: 1019 *reinterpret_cast<T*>(mData+mDataPos) = val; 1020 return finishWrite(sizeof(val)); 1021 } 1022 1023 status_t err = growData(sizeof(val)); 1024 if (err == NO_ERROR) goto restart_write; 1025 return err; 1026 } 1027 1028 status_t Parcel::readInt32(int32_t *pArg) const 1029 { 1030 return readAligned(pArg); 1031 } 1032 1033 int32_t Parcel::readInt32() const 1034 { 1035 return readAligned<int32_t>(); 1036 } 1037 1038 1039 status_t Parcel::readInt64(int64_t *pArg) const 1040 { 1041 return readAligned(pArg); 1042 } 1043 1044 1045 int64_t Parcel::readInt64() const 1046 { 1047 return readAligned<int64_t>(); 1048 } 1049 1050 status_t Parcel::readPointer(uintptr_t *pArg) const 1051 { 1052 status_t ret; 1053 binder_uintptr_t ptr; 1054 ret = readAligned(&ptr); 1055 if (!ret) 1056 *pArg = ptr; 1057 return ret; 1058 } 1059 1060 uintptr_t Parcel::readPointer() const 1061 { 1062 return readAligned<binder_uintptr_t>(); 1063 } 1064 1065 1066 status_t Parcel::readFloat(float *pArg) const 1067 { 1068 return readAligned(pArg); 1069 } 1070 1071 1072 float Parcel::readFloat() const 1073 { 1074 return readAligned<float>(); 1075 } 1076 1077 #if defined(__mips__) && defined(__mips_hard_float) 1078 1079 status_t Parcel::readDouble(double *pArg) const 1080 { 1081 union { 1082 double d; 1083 unsigned long long ll; 1084 } u; 1085 u.d = 0; 1086 status_t status; 1087 status = readAligned(&u.ll); 1088 *pArg = u.d; 1089 return status; 1090 } 1091 1092 double Parcel::readDouble() const 1093 { 1094 union { 1095 double d; 1096 unsigned long long ll; 1097 } u; 1098 u.ll = readAligned<unsigned long long>(); 1099 return u.d; 1100 } 1101 1102 #else 1103 1104 status_t Parcel::readDouble(double *pArg) const 1105 { 1106 return readAligned(pArg); 1107 } 1108 1109 double Parcel::readDouble() const 1110 { 1111 return readAligned<double>(); 1112 } 1113 1114 #endif 1115 1116 status_t Parcel::readIntPtr(intptr_t *pArg) const 1117 { 1118 return readAligned(pArg); 1119 } 1120 1121 1122 intptr_t Parcel::readIntPtr() const 1123 { 1124 return readAligned<intptr_t>(); 1125 } 1126 1127 1128 const char* Parcel::readCString() const 1129 { 1130 const size_t avail = mDataSize-mDataPos; 1131 if (avail > 0) { 1132 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1133 // is the string's trailing NUL within the parcel's valid bounds? 1134 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1135 if (eos) { 1136 const size_t len = eos - str; 1137 mDataPos += PAD_SIZE(len+1); 1138 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos); 1139 return str; 1140 } 1141 } 1142 return NULL; 1143 } 1144 1145 String8 Parcel::readString8() const 1146 { 1147 int32_t size = readInt32(); 1148 // watch for potential int overflow adding 1 for trailing NUL 1149 if (size > 0 && size < INT32_MAX) { 1150 const char* str = (const char*)readInplace(size+1); 1151 if (str) return String8(str, size); 1152 } 1153 return String8(); 1154 } 1155 1156 String16 Parcel::readString16() const 1157 { 1158 size_t len; 1159 const char16_t* str = readString16Inplace(&len); 1160 if (str) return String16(str, len); 1161 ALOGE("Reading a NULL string not supported here."); 1162 return String16(); 1163 } 1164 1165 const char16_t* Parcel::readString16Inplace(size_t* outLen) const 1166 { 1167 int32_t size = readInt32(); 1168 // watch for potential int overflow from size+1 1169 if (size >= 0 && size < INT32_MAX) { 1170 *outLen = size; 1171 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 1172 if (str != NULL) { 1173 return str; 1174 } 1175 } 1176 *outLen = 0; 1177 return NULL; 1178 } 1179 1180 sp<IBinder> Parcel::readStrongBinder() const 1181 { 1182 sp<IBinder> val; 1183 unflatten_binder(ProcessState::self(), *this, &val); 1184 return val; 1185 } 1186 1187 wp<IBinder> Parcel::readWeakBinder() const 1188 { 1189 wp<IBinder> val; 1190 unflatten_binder(ProcessState::self(), *this, &val); 1191 return val; 1192 } 1193 1194 int32_t Parcel::readExceptionCode() const 1195 { 1196 int32_t exception_code = readAligned<int32_t>(); 1197 if (exception_code == EX_HAS_REPLY_HEADER) { 1198 int32_t header_start = dataPosition(); 1199 int32_t header_size = readAligned<int32_t>(); 1200 // Skip over fat responses headers. Not used (or propagated) in 1201 // native code 1202 setDataPosition(header_start + header_size); 1203 // And fat response headers are currently only used when there are no 1204 // exceptions, so return no error: 1205 return 0; 1206 } 1207 return exception_code; 1208 } 1209 1210 native_handle* Parcel::readNativeHandle() const 1211 { 1212 int numFds, numInts; 1213 status_t err; 1214 err = readInt32(&numFds); 1215 if (err != NO_ERROR) return 0; 1216 err = readInt32(&numInts); 1217 if (err != NO_ERROR) return 0; 1218 1219 native_handle* h = native_handle_create(numFds, numInts); 1220 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) { 1221 h->data[i] = dup(readFileDescriptor()); 1222 if (h->data[i] < 0) err = BAD_VALUE; 1223 } 1224 err = read(h->data + numFds, sizeof(int)*numInts); 1225 if (err != NO_ERROR) { 1226 native_handle_close(h); 1227 native_handle_delete(h); 1228 h = 0; 1229 } 1230 return h; 1231 } 1232 1233 1234 int Parcel::readFileDescriptor() const 1235 { 1236 const flat_binder_object* flat = readObject(true); 1237 if (flat) { 1238 switch (flat->type) { 1239 case BINDER_TYPE_FD: 1240 //ALOGI("Returning file descriptor %ld from parcel %p", flat->handle, this); 1241 return flat->handle; 1242 } 1243 } 1244 return BAD_TYPE; 1245 } 1246 1247 // WARNING: This method must stay in sync with writeToParcel() 1248 // in frameworks/base/core/java/android/os/ParcelFileDescriptor.java 1249 int Parcel::readParcelFileDescriptor(int& outCommChannel) const { 1250 int fd; 1251 outCommChannel = -1; 1252 1253 if (readInt32() == 0) { 1254 fd = -1; 1255 } else { 1256 fd = readFileDescriptor(); 1257 if (fd >= 0 && readInt32() != 0) { 1258 outCommChannel = readFileDescriptor(); 1259 } 1260 } 1261 return fd; 1262 } 1263 1264 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const 1265 { 1266 int32_t useAshmem; 1267 status_t status = readInt32(&useAshmem); 1268 if (status) return status; 1269 1270 if (!useAshmem) { 1271 ALOGV("readBlob: read in place"); 1272 const void* ptr = readInplace(len); 1273 if (!ptr) return BAD_VALUE; 1274 1275 outBlob->init(false /*mapped*/, const_cast<void*>(ptr), len); 1276 return NO_ERROR; 1277 } 1278 1279 ALOGV("readBlob: read from ashmem"); 1280 int fd = readFileDescriptor(); 1281 if (fd == int(BAD_TYPE)) return BAD_VALUE; 1282 1283 void* ptr = ::mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); 1284 if (ptr == MAP_FAILED) return NO_MEMORY; 1285 1286 outBlob->init(true /*mapped*/, ptr, len); 1287 return NO_ERROR; 1288 } 1289 1290 status_t Parcel::read(FlattenableHelperInterface& val) const 1291 { 1292 // size 1293 const size_t len = this->readInt32(); 1294 const size_t fd_count = this->readInt32(); 1295 1296 // payload 1297 void const* const buf = this->readInplace(PAD_SIZE(len)); 1298 if (buf == NULL) 1299 return BAD_VALUE; 1300 1301 int* fds = NULL; 1302 if (fd_count) { 1303 fds = new int[fd_count]; 1304 } 1305 1306 status_t err = NO_ERROR; 1307 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) { 1308 int oldfd = this->readFileDescriptor(); 1309 fds[i] = dup(oldfd); 1310 if (fds[i] < 0) { 1311 int dupErrno = errno; 1312 err = BAD_VALUE; 1313 int flags = fcntl(oldfd, F_GETFD); 1314 int fcntlErrno = errno; 1315 const flat_binder_object* flat = readObject(true); 1316 ALOGE("dup failed in Parcel::read, fd %zu of %zu\n" 1317 " dup(%d) = %d [errno: %d (%s)]\n" 1318 " fcntl(%d, F_GETFD) = %d [errno: %d (%s)]\n" 1319 " flat %p type %d", 1320 i, fd_count, 1321 oldfd, fds[i], dupErrno, strerror(dupErrno), 1322 oldfd, flags, fcntlErrno, strerror(fcntlErrno), 1323 flat, flat ? flat->type : 0); 1324 CallStack(LOG_TAG); 1325 } 1326 } 1327 1328 if (err == NO_ERROR) { 1329 err = val.unflatten(buf, len, fds, fd_count); 1330 } 1331 1332 if (fd_count) { 1333 delete [] fds; 1334 } 1335 1336 return err; 1337 } 1338 const flat_binder_object* Parcel::readObject(bool nullMetaData) const 1339 { 1340 const size_t DPOS = mDataPos; 1341 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) { 1342 const flat_binder_object* obj 1343 = reinterpret_cast<const flat_binder_object*>(mData+DPOS); 1344 mDataPos = DPOS + sizeof(flat_binder_object); 1345 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) { 1346 // When transferring a NULL object, we don't write it into 1347 // the object list, so we don't want to check for it when 1348 // reading. 1349 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1350 return obj; 1351 } 1352 1353 // Ensure that this object is valid... 1354 binder_size_t* const OBJS = mObjects; 1355 const size_t N = mObjectsSize; 1356 size_t opos = mNextObjectHint; 1357 1358 if (N > 0) { 1359 ALOGV("Parcel %p looking for obj at %zu, hint=%zu", 1360 this, DPOS, opos); 1361 1362 // Start at the current hint position, looking for an object at 1363 // the current data position. 1364 if (opos < N) { 1365 while (opos < (N-1) && OBJS[opos] < DPOS) { 1366 opos++; 1367 } 1368 } else { 1369 opos = N-1; 1370 } 1371 if (OBJS[opos] == DPOS) { 1372 // Found it! 1373 ALOGV("Parcel %p found obj %zu at index %zu with forward search", 1374 this, DPOS, opos); 1375 mNextObjectHint = opos+1; 1376 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1377 return obj; 1378 } 1379 1380 // Look backwards for it... 1381 while (opos > 0 && OBJS[opos] > DPOS) { 1382 opos--; 1383 } 1384 if (OBJS[opos] == DPOS) { 1385 // Found it! 1386 ALOGV("Parcel %p found obj %zu at index %zu with backward search", 1387 this, DPOS, opos); 1388 mNextObjectHint = opos+1; 1389 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1390 return obj; 1391 } 1392 } 1393 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list", 1394 this, DPOS); 1395 } 1396 return NULL; 1397 } 1398 1399 void Parcel::closeFileDescriptors() 1400 { 1401 size_t i = mObjectsSize; 1402 if (i > 0) { 1403 //ALOGI("Closing file descriptors for %zu objects...", i); 1404 } 1405 while (i > 0) { 1406 i--; 1407 const flat_binder_object* flat 1408 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1409 if (flat->type == BINDER_TYPE_FD) { 1410 //ALOGI("Closing fd: %ld", flat->handle); 1411 close(flat->handle); 1412 } 1413 } 1414 } 1415 1416 uintptr_t Parcel::ipcData() const 1417 { 1418 return reinterpret_cast<uintptr_t>(mData); 1419 } 1420 1421 size_t Parcel::ipcDataSize() const 1422 { 1423 return (mDataSize > mDataPos ? mDataSize : mDataPos); 1424 } 1425 1426 uintptr_t Parcel::ipcObjects() const 1427 { 1428 return reinterpret_cast<uintptr_t>(mObjects); 1429 } 1430 1431 size_t Parcel::ipcObjectsCount() const 1432 { 1433 return mObjectsSize; 1434 } 1435 1436 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 1437 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 1438 { 1439 binder_size_t minOffset = 0; 1440 freeDataNoInit(); 1441 mError = NO_ERROR; 1442 mData = const_cast<uint8_t*>(data); 1443 mDataSize = mDataCapacity = dataSize; 1444 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid()); 1445 mDataPos = 0; 1446 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos); 1447 mObjects = const_cast<binder_size_t*>(objects); 1448 mObjectsSize = mObjectsCapacity = objectsCount; 1449 mNextObjectHint = 0; 1450 mOwner = relFunc; 1451 mOwnerCookie = relCookie; 1452 for (size_t i = 0; i < mObjectsSize; i++) { 1453 binder_size_t offset = mObjects[i]; 1454 if (offset < minOffset) { 1455 ALOGE("%s: bad object offset %"PRIu64" < %"PRIu64"\n", 1456 __func__, (uint64_t)offset, (uint64_t)minOffset); 1457 mObjectsSize = 0; 1458 break; 1459 } 1460 minOffset = offset + sizeof(flat_binder_object); 1461 } 1462 scanForFds(); 1463 } 1464 1465 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const 1466 { 1467 to << "Parcel("; 1468 1469 if (errorCheck() != NO_ERROR) { 1470 const status_t err = errorCheck(); 1471 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\""; 1472 } else if (dataSize() > 0) { 1473 const uint8_t* DATA = data(); 1474 to << indent << HexDump(DATA, dataSize()) << dedent; 1475 const binder_size_t* OBJS = objects(); 1476 const size_t N = objectsCount(); 1477 for (size_t i=0; i<N; i++) { 1478 const flat_binder_object* flat 1479 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 1480 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 1481 << TypeCode(flat->type & 0x7f7f7f00) 1482 << " = " << flat->binder; 1483 } 1484 } else { 1485 to << "NULL"; 1486 } 1487 1488 to << ")"; 1489 } 1490 1491 void Parcel::releaseObjects() 1492 { 1493 const sp<ProcessState> proc(ProcessState::self()); 1494 size_t i = mObjectsSize; 1495 uint8_t* const data = mData; 1496 binder_size_t* const objects = mObjects; 1497 while (i > 0) { 1498 i--; 1499 const flat_binder_object* flat 1500 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1501 release_object(proc, *flat, this); 1502 } 1503 } 1504 1505 void Parcel::acquireObjects() 1506 { 1507 const sp<ProcessState> proc(ProcessState::self()); 1508 size_t i = mObjectsSize; 1509 uint8_t* const data = mData; 1510 binder_size_t* const objects = mObjects; 1511 while (i > 0) { 1512 i--; 1513 const flat_binder_object* flat 1514 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1515 acquire_object(proc, *flat, this); 1516 } 1517 } 1518 1519 void Parcel::freeData() 1520 { 1521 freeDataNoInit(); 1522 initState(); 1523 } 1524 1525 void Parcel::freeDataNoInit() 1526 { 1527 if (mOwner) { 1528 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1529 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1530 } else { 1531 releaseObjects(); 1532 if (mData) free(mData); 1533 if (mObjects) free(mObjects); 1534 } 1535 } 1536 1537 status_t Parcel::growData(size_t len) 1538 { 1539 size_t newSize = ((mDataSize+len)*3)/2; 1540 return (newSize <= mDataSize) 1541 ? (status_t) NO_MEMORY 1542 : continueWrite(newSize); 1543 } 1544 1545 status_t Parcel::restartWrite(size_t desired) 1546 { 1547 if (mOwner) { 1548 freeData(); 1549 return continueWrite(desired); 1550 } 1551 1552 uint8_t* data = (uint8_t*)realloc(mData, desired); 1553 if (!data && desired > mDataCapacity) { 1554 mError = NO_MEMORY; 1555 return NO_MEMORY; 1556 } 1557 1558 releaseObjects(); 1559 1560 if (data) { 1561 mData = data; 1562 mDataCapacity = desired; 1563 } 1564 1565 mDataSize = mDataPos = 0; 1566 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize); 1567 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos); 1568 1569 free(mObjects); 1570 mObjects = NULL; 1571 mObjectsSize = mObjectsCapacity = 0; 1572 mNextObjectHint = 0; 1573 mHasFds = false; 1574 mFdsKnown = true; 1575 mAllowFds = true; 1576 1577 return NO_ERROR; 1578 } 1579 1580 status_t Parcel::continueWrite(size_t desired) 1581 { 1582 // If shrinking, first adjust for any objects that appear 1583 // after the new data size. 1584 size_t objectsSize = mObjectsSize; 1585 if (desired < mDataSize) { 1586 if (desired == 0) { 1587 objectsSize = 0; 1588 } else { 1589 while (objectsSize > 0) { 1590 if (mObjects[objectsSize-1] < desired) 1591 break; 1592 objectsSize--; 1593 } 1594 } 1595 } 1596 1597 if (mOwner) { 1598 // If the size is going to zero, just release the owner's data. 1599 if (desired == 0) { 1600 freeData(); 1601 return NO_ERROR; 1602 } 1603 1604 // If there is a different owner, we need to take 1605 // posession. 1606 uint8_t* data = (uint8_t*)malloc(desired); 1607 if (!data) { 1608 mError = NO_MEMORY; 1609 return NO_MEMORY; 1610 } 1611 binder_size_t* objects = NULL; 1612 1613 if (objectsSize) { 1614 objects = (binder_size_t*)malloc(objectsSize*sizeof(binder_size_t)); 1615 if (!objects) { 1616 free(data); 1617 1618 mError = NO_MEMORY; 1619 return NO_MEMORY; 1620 } 1621 1622 // Little hack to only acquire references on objects 1623 // we will be keeping. 1624 size_t oldObjectsSize = mObjectsSize; 1625 mObjectsSize = objectsSize; 1626 acquireObjects(); 1627 mObjectsSize = oldObjectsSize; 1628 } 1629 1630 if (mData) { 1631 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 1632 } 1633 if (objects && mObjects) { 1634 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t)); 1635 } 1636 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1637 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1638 mOwner = NULL; 1639 1640 mData = data; 1641 mObjects = objects; 1642 mDataSize = (mDataSize < desired) ? mDataSize : desired; 1643 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1644 mDataCapacity = desired; 1645 mObjectsSize = mObjectsCapacity = objectsSize; 1646 mNextObjectHint = 0; 1647 1648 } else if (mData) { 1649 if (objectsSize < mObjectsSize) { 1650 // Need to release refs on any objects we are dropping. 1651 const sp<ProcessState> proc(ProcessState::self()); 1652 for (size_t i=objectsSize; i<mObjectsSize; i++) { 1653 const flat_binder_object* flat 1654 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1655 if (flat->type == BINDER_TYPE_FD) { 1656 // will need to rescan because we may have lopped off the only FDs 1657 mFdsKnown = false; 1658 } 1659 release_object(proc, *flat, this); 1660 } 1661 binder_size_t* objects = 1662 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t)); 1663 if (objects) { 1664 mObjects = objects; 1665 } 1666 mObjectsSize = objectsSize; 1667 mNextObjectHint = 0; 1668 } 1669 1670 // We own the data, so we can just do a realloc(). 1671 if (desired > mDataCapacity) { 1672 uint8_t* data = (uint8_t*)realloc(mData, desired); 1673 if (data) { 1674 mData = data; 1675 mDataCapacity = desired; 1676 } else if (desired > mDataCapacity) { 1677 mError = NO_MEMORY; 1678 return NO_MEMORY; 1679 } 1680 } else { 1681 if (mDataSize > desired) { 1682 mDataSize = desired; 1683 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1684 } 1685 if (mDataPos > desired) { 1686 mDataPos = desired; 1687 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1688 } 1689 } 1690 1691 } else { 1692 // This is the first data. Easy! 1693 uint8_t* data = (uint8_t*)malloc(desired); 1694 if (!data) { 1695 mError = NO_MEMORY; 1696 return NO_MEMORY; 1697 } 1698 1699 if(!(mDataCapacity == 0 && mObjects == NULL 1700 && mObjectsCapacity == 0)) { 1701 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired); 1702 } 1703 1704 mData = data; 1705 mDataSize = mDataPos = 0; 1706 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1707 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1708 mDataCapacity = desired; 1709 } 1710 1711 return NO_ERROR; 1712 } 1713 1714 void Parcel::initState() 1715 { 1716 mError = NO_ERROR; 1717 mData = 0; 1718 mDataSize = 0; 1719 mDataCapacity = 0; 1720 mDataPos = 0; 1721 ALOGV("initState Setting data size of %p to %zu", this, mDataSize); 1722 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos); 1723 mObjects = NULL; 1724 mObjectsSize = 0; 1725 mObjectsCapacity = 0; 1726 mNextObjectHint = 0; 1727 mHasFds = false; 1728 mFdsKnown = true; 1729 mAllowFds = true; 1730 mOwner = NULL; 1731 } 1732 1733 void Parcel::scanForFds() const 1734 { 1735 bool hasFds = false; 1736 for (size_t i=0; i<mObjectsSize; i++) { 1737 const flat_binder_object* flat 1738 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 1739 if (flat->type == BINDER_TYPE_FD) { 1740 hasFds = true; 1741 break; 1742 } 1743 } 1744 mHasFds = hasFds; 1745 mFdsKnown = true; 1746 } 1747 1748 // --- Parcel::Blob --- 1749 1750 Parcel::Blob::Blob() : 1751 mMapped(false), mData(NULL), mSize(0) { 1752 } 1753 1754 Parcel::Blob::~Blob() { 1755 release(); 1756 } 1757 1758 void Parcel::Blob::release() { 1759 if (mMapped && mData) { 1760 ::munmap(mData, mSize); 1761 } 1762 clear(); 1763 } 1764 1765 void Parcel::Blob::init(bool mapped, void* data, size_t size) { 1766 mMapped = mapped; 1767 mData = data; 1768 mSize = size; 1769 } 1770 1771 void Parcel::Blob::clear() { 1772 mMapped = false; 1773 mData = NULL; 1774 mSize = 0; 1775 } 1776 1777 }; // namespace android 1778