1 /* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "RefBase" 18 // #define LOG_NDEBUG 0 19 20 #include <utils/RefBase.h> 21 22 #include <utils/CallStack.h> 23 24 #ifndef __unused 25 #define __unused __attribute__((__unused__)) 26 #endif 27 28 // compile with refcounting debugging enabled 29 #define DEBUG_REFS 0 30 31 // whether ref-tracking is enabled by default, if not, trackMe(true, false) 32 // needs to be called explicitly 33 #define DEBUG_REFS_ENABLED_BY_DEFAULT 0 34 35 // whether callstack are collected (significantly slows things down) 36 #define DEBUG_REFS_CALLSTACK_ENABLED 1 37 38 // folder where stack traces are saved when DEBUG_REFS is enabled 39 // this folder needs to exist and be writable 40 #define DEBUG_REFS_CALLSTACK_PATH "/data/debug" 41 42 // log all reference counting operations 43 #define PRINT_REFS 0 44 45 // --------------------------------------------------------------------------- 46 47 namespace android { 48 49 // Observations, invariants, etc: 50 51 // By default, obects are destroyed when the last strong reference disappears 52 // or, if the object never had a strong reference, when the last weak reference 53 // disappears. 54 // 55 // OBJECT_LIFETIME_WEAK changes this behavior to retain the object 56 // unconditionally until the last reference of either kind disappears. The 57 // client ensures that the extendObjectLifetime call happens before the dec 58 // call that would otherwise have deallocated the object, or before an 59 // attemptIncStrong call that might rely on it. We do not worry about 60 // concurrent changes to the object lifetime. 61 // 62 // AttemptIncStrong will succeed if the object has a strong reference, or if it 63 // has a weak reference and has never had a strong reference. 64 // AttemptIncWeak really does succeed only if there is already a WEAK 65 // reference, and thus may fail when attemptIncStrong would succeed. 66 // 67 // mStrong is the strong reference count. mWeak is the weak reference count. 68 // Between calls, and ignoring memory ordering effects, mWeak includes strong 69 // references, and is thus >= mStrong. 70 // 71 // A weakref_impl holds all the information, including both reference counts, 72 // required to perform wp<> operations. Thus these can continue to be performed 73 // after the RefBase object has been destroyed. 74 // 75 // A weakref_impl is allocated as the value of mRefs in a RefBase object on 76 // construction. 77 // In the OBJECT_LIFETIME_STRONG case, it is normally deallocated in decWeak, 78 // and hence lives as long as the last weak reference. (It can also be 79 // deallocated in the RefBase destructor iff the strong reference count was 80 // never incremented and the weak count is zero, e.g. if the RefBase object is 81 // explicitly destroyed without decrementing the strong count. This should be 82 // avoided.) In this case, the RefBase destructor should be invoked from 83 // decStrong. 84 // In the OBJECT_LIFETIME_WEAK case, the weakref_impl is always deallocated in 85 // the RefBase destructor, which is always invoked by decWeak. DecStrong 86 // explicitly avoids the deletion in this case. 87 // 88 // Memory ordering: 89 // The client must ensure that every inc() call, together with all other 90 // accesses to the object, happens before the corresponding dec() call. 91 // 92 // We try to keep memory ordering constraints on atomics as weak as possible, 93 // since memory fences or ordered memory accesses are likely to be a major 94 // performance cost for this code. All accesses to mStrong, mWeak, and mFlags 95 // explicitly relax memory ordering in some way. 96 // 97 // The only operations that are not memory_order_relaxed are reference count 98 // decrements. All reference count decrements are release operations. In 99 // addition, the final decrement leading the deallocation is followed by an 100 // acquire fence, which we can view informally as also turning it into an 101 // acquire operation. (See 29.8p4 [atomics.fences] for details. We could 102 // alternatively use acq_rel operations for all decrements. This is probably 103 // slower on most current (2016) hardware, especially on ARMv7, but that may 104 // not be true indefinitely.) 105 // 106 // This convention ensures that the second-to-last decrement synchronizes with 107 // (in the language of 1.10 in the C++ standard) the final decrement of a 108 // reference count. Since reference counts are only updated using atomic 109 // read-modify-write operations, this also extends to any earlier decrements. 110 // (See "release sequence" in 1.10.) 111 // 112 // Since all operations on an object happen before the corresponding reference 113 // count decrement, and all reference count decrements happen before the final 114 // one, we are guaranteed that all other object accesses happen before the 115 // object is destroyed. 116 117 118 #define INITIAL_STRONG_VALUE (1<<28) 119 120 #define MAX_COUNT 0xfffff 121 122 // Test whether the argument is a clearly invalid strong reference count. 123 // Used only for error checking on the value before an atomic decrement. 124 // Intended to be very cheap. 125 // Note that we cannot just check for excess decrements by comparing to zero 126 // since the object would be deallocated before that. 127 #define BAD_STRONG(c) \ 128 ((c) == 0 || ((c) & (~(MAX_COUNT | INITIAL_STRONG_VALUE))) != 0) 129 130 // Same for weak counts. 131 #define BAD_WEAK(c) ((c) == 0 || ((c) & (~MAX_COUNT)) != 0) 132 133 // --------------------------------------------------------------------------- 134 135 class RefBase::weakref_impl : public RefBase::weakref_type 136 { 137 public: 138 std::atomic<int32_t> mStrong; 139 std::atomic<int32_t> mWeak; 140 RefBase* const mBase; 141 std::atomic<int32_t> mFlags; 142 143 #if !DEBUG_REFS 144 145 explicit weakref_impl(RefBase* base) 146 : mStrong(INITIAL_STRONG_VALUE) 147 , mWeak(0) 148 , mBase(base) 149 , mFlags(0) 150 { 151 } 152 153 void addStrongRef(const void* /*id*/) { } 154 void removeStrongRef(const void* /*id*/) { } 155 void renameStrongRefId(const void* /*old_id*/, const void* /*new_id*/) { } 156 void addWeakRef(const void* /*id*/) { } 157 void removeWeakRef(const void* /*id*/) { } 158 void renameWeakRefId(const void* /*old_id*/, const void* /*new_id*/) { } 159 void printRefs() const { } 160 void trackMe(bool, bool) { } 161 162 #else 163 164 weakref_impl(RefBase* base) 165 : mStrong(INITIAL_STRONG_VALUE) 166 , mWeak(0) 167 , mBase(base) 168 , mFlags(0) 169 , mStrongRefs(NULL) 170 , mWeakRefs(NULL) 171 , mTrackEnabled(!!DEBUG_REFS_ENABLED_BY_DEFAULT) 172 , mRetain(false) 173 { 174 } 175 176 ~weakref_impl() 177 { 178 bool dumpStack = false; 179 if (!mRetain && mStrongRefs != NULL) { 180 dumpStack = true; 181 ALOGE("Strong references remain:"); 182 ref_entry* refs = mStrongRefs; 183 while (refs) { 184 char inc = refs->ref >= 0 ? '+' : '-'; 185 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref); 186 #if DEBUG_REFS_CALLSTACK_ENABLED 187 refs->stack.log(LOG_TAG); 188 #endif 189 refs = refs->next; 190 } 191 } 192 193 if (!mRetain && mWeakRefs != NULL) { 194 dumpStack = true; 195 ALOGE("Weak references remain!"); 196 ref_entry* refs = mWeakRefs; 197 while (refs) { 198 char inc = refs->ref >= 0 ? '+' : '-'; 199 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref); 200 #if DEBUG_REFS_CALLSTACK_ENABLED 201 refs->stack.log(LOG_TAG); 202 #endif 203 refs = refs->next; 204 } 205 } 206 if (dumpStack) { 207 ALOGE("above errors at:"); 208 CallStack stack(LOG_TAG); 209 } 210 } 211 212 void addStrongRef(const void* id) { 213 //ALOGD_IF(mTrackEnabled, 214 // "addStrongRef: RefBase=%p, id=%p", mBase, id); 215 addRef(&mStrongRefs, id, mStrong.load(std::memory_order_relaxed)); 216 } 217 218 void removeStrongRef(const void* id) { 219 //ALOGD_IF(mTrackEnabled, 220 // "removeStrongRef: RefBase=%p, id=%p", mBase, id); 221 if (!mRetain) { 222 removeRef(&mStrongRefs, id); 223 } else { 224 addRef(&mStrongRefs, id, -mStrong.load(std::memory_order_relaxed)); 225 } 226 } 227 228 void renameStrongRefId(const void* old_id, const void* new_id) { 229 //ALOGD_IF(mTrackEnabled, 230 // "renameStrongRefId: RefBase=%p, oid=%p, nid=%p", 231 // mBase, old_id, new_id); 232 renameRefsId(mStrongRefs, old_id, new_id); 233 } 234 235 void addWeakRef(const void* id) { 236 addRef(&mWeakRefs, id, mWeak.load(std::memory_order_relaxed)); 237 } 238 239 void removeWeakRef(const void* id) { 240 if (!mRetain) { 241 removeRef(&mWeakRefs, id); 242 } else { 243 addRef(&mWeakRefs, id, -mWeak.load(std::memory_order_relaxed)); 244 } 245 } 246 247 void renameWeakRefId(const void* old_id, const void* new_id) { 248 renameRefsId(mWeakRefs, old_id, new_id); 249 } 250 251 void trackMe(bool track, bool retain) 252 { 253 mTrackEnabled = track; 254 mRetain = retain; 255 } 256 257 void printRefs() const 258 { 259 String8 text; 260 261 { 262 Mutex::Autolock _l(mMutex); 263 char buf[128]; 264 snprintf(buf, sizeof(buf), 265 "Strong references on RefBase %p (weakref_type %p):\n", 266 mBase, this); 267 text.append(buf); 268 printRefsLocked(&text, mStrongRefs); 269 snprintf(buf, sizeof(buf), 270 "Weak references on RefBase %p (weakref_type %p):\n", 271 mBase, this); 272 text.append(buf); 273 printRefsLocked(&text, mWeakRefs); 274 } 275 276 { 277 char name[100]; 278 snprintf(name, sizeof(name), DEBUG_REFS_CALLSTACK_PATH "/%p.stack", 279 this); 280 int rc = open(name, O_RDWR | O_CREAT | O_APPEND, 644); 281 if (rc >= 0) { 282 write(rc, text.string(), text.length()); 283 close(rc); 284 ALOGD("STACK TRACE for %p saved in %s", this, name); 285 } 286 else ALOGE("FAILED TO PRINT STACK TRACE for %p in %s: %s", this, 287 name, strerror(errno)); 288 } 289 } 290 291 private: 292 struct ref_entry 293 { 294 ref_entry* next; 295 const void* id; 296 #if DEBUG_REFS_CALLSTACK_ENABLED 297 CallStack stack; 298 #endif 299 int32_t ref; 300 }; 301 302 void addRef(ref_entry** refs, const void* id, int32_t mRef) 303 { 304 if (mTrackEnabled) { 305 AutoMutex _l(mMutex); 306 307 ref_entry* ref = new ref_entry; 308 // Reference count at the time of the snapshot, but before the 309 // update. Positive value means we increment, negative--we 310 // decrement the reference count. 311 ref->ref = mRef; 312 ref->id = id; 313 #if DEBUG_REFS_CALLSTACK_ENABLED 314 ref->stack.update(2); 315 #endif 316 ref->next = *refs; 317 *refs = ref; 318 } 319 } 320 321 void removeRef(ref_entry** refs, const void* id) 322 { 323 if (mTrackEnabled) { 324 AutoMutex _l(mMutex); 325 326 ref_entry* const head = *refs; 327 ref_entry* ref = head; 328 while (ref != NULL) { 329 if (ref->id == id) { 330 *refs = ref->next; 331 delete ref; 332 return; 333 } 334 refs = &ref->next; 335 ref = *refs; 336 } 337 338 ALOGE("RefBase: removing id %p on RefBase %p" 339 "(weakref_type %p) that doesn't exist!", 340 id, mBase, this); 341 342 ref = head; 343 while (ref) { 344 char inc = ref->ref >= 0 ? '+' : '-'; 345 ALOGD("\t%c ID %p (ref %d):", inc, ref->id, ref->ref); 346 ref = ref->next; 347 } 348 349 CallStack stack(LOG_TAG); 350 } 351 } 352 353 void renameRefsId(ref_entry* r, const void* old_id, const void* new_id) 354 { 355 if (mTrackEnabled) { 356 AutoMutex _l(mMutex); 357 ref_entry* ref = r; 358 while (ref != NULL) { 359 if (ref->id == old_id) { 360 ref->id = new_id; 361 } 362 ref = ref->next; 363 } 364 } 365 } 366 367 void printRefsLocked(String8* out, const ref_entry* refs) const 368 { 369 char buf[128]; 370 while (refs) { 371 char inc = refs->ref >= 0 ? '+' : '-'; 372 snprintf(buf, sizeof(buf), "\t%c ID %p (ref %d):\n", 373 inc, refs->id, refs->ref); 374 out->append(buf); 375 #if DEBUG_REFS_CALLSTACK_ENABLED 376 out->append(refs->stack.toString("\t\t")); 377 #else 378 out->append("\t\t(call stacks disabled)"); 379 #endif 380 refs = refs->next; 381 } 382 } 383 384 mutable Mutex mMutex; 385 ref_entry* mStrongRefs; 386 ref_entry* mWeakRefs; 387 388 bool mTrackEnabled; 389 // Collect stack traces on addref and removeref, instead of deleting the stack references 390 // on removeref that match the address ones. 391 bool mRetain; 392 393 #endif 394 }; 395 396 // --------------------------------------------------------------------------- 397 398 void RefBase::incStrong(const void* id) const 399 { 400 weakref_impl* const refs = mRefs; 401 refs->incWeak(id); 402 403 refs->addStrongRef(id); 404 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed); 405 ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs); 406 #if PRINT_REFS 407 ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c); 408 #endif 409 if (c != INITIAL_STRONG_VALUE) { 410 return; 411 } 412 413 int32_t old __unused = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, std::memory_order_relaxed); 414 // A decStrong() must still happen after us. 415 ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old); 416 refs->mBase->onFirstRef(); 417 } 418 419 void RefBase::decStrong(const void* id) const 420 { 421 weakref_impl* const refs = mRefs; 422 refs->removeStrongRef(id); 423 const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release); 424 #if PRINT_REFS 425 ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c); 426 #endif 427 LOG_ALWAYS_FATAL_IF(BAD_STRONG(c), "decStrong() called on %p too many times", 428 refs); 429 if (c == 1) { 430 std::atomic_thread_fence(std::memory_order_acquire); 431 refs->mBase->onLastStrongRef(id); 432 int32_t flags = refs->mFlags.load(std::memory_order_relaxed); 433 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { 434 delete this; 435 // The destructor does not delete refs in this case. 436 } 437 } 438 // Note that even with only strong reference operations, the thread 439 // deallocating this may not be the same as the thread deallocating refs. 440 // That's OK: all accesses to this happen before its deletion here, 441 // and all accesses to refs happen before its deletion in the final decWeak. 442 // The destructor can safely access mRefs because either it's deleting 443 // mRefs itself, or it's running entirely before the final mWeak decrement. 444 // 445 // Since we're doing atomic loads of `flags`, the static analyzer assumes 446 // they can change between `delete this;` and `refs->decWeak(id);`. This is 447 // not the case. The analyzer may become more okay with this patten when 448 // https://bugs.llvm.org/show_bug.cgi?id=34365 gets resolved. NOLINTNEXTLINE 449 refs->decWeak(id); 450 } 451 452 void RefBase::forceIncStrong(const void* id) const 453 { 454 // Allows initial mStrong of 0 in addition to INITIAL_STRONG_VALUE. 455 // TODO: Better document assumptions. 456 weakref_impl* const refs = mRefs; 457 refs->incWeak(id); 458 459 refs->addStrongRef(id); 460 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed); 461 ALOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow", 462 refs); 463 #if PRINT_REFS 464 ALOGD("forceIncStrong of %p from %p: cnt=%d\n", this, id, c); 465 #endif 466 467 switch (c) { 468 case INITIAL_STRONG_VALUE: 469 refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, 470 std::memory_order_relaxed); 471 // fall through... 472 case 0: 473 refs->mBase->onFirstRef(); 474 } 475 } 476 477 int32_t RefBase::getStrongCount() const 478 { 479 // Debugging only; No memory ordering guarantees. 480 return mRefs->mStrong.load(std::memory_order_relaxed); 481 } 482 483 RefBase* RefBase::weakref_type::refBase() const 484 { 485 return static_cast<const weakref_impl*>(this)->mBase; 486 } 487 488 void RefBase::weakref_type::incWeak(const void* id) 489 { 490 weakref_impl* const impl = static_cast<weakref_impl*>(this); 491 impl->addWeakRef(id); 492 const int32_t c __unused = impl->mWeak.fetch_add(1, 493 std::memory_order_relaxed); 494 ALOG_ASSERT(c >= 0, "incWeak called on %p after last weak ref", this); 495 } 496 497 498 void RefBase::weakref_type::decWeak(const void* id) 499 { 500 weakref_impl* const impl = static_cast<weakref_impl*>(this); 501 impl->removeWeakRef(id); 502 const int32_t c = impl->mWeak.fetch_sub(1, std::memory_order_release); 503 LOG_ALWAYS_FATAL_IF(BAD_WEAK(c), "decWeak called on %p too many times", 504 this); 505 if (c != 1) return; 506 atomic_thread_fence(std::memory_order_acquire); 507 508 int32_t flags = impl->mFlags.load(std::memory_order_relaxed); 509 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { 510 // This is the regular lifetime case. The object is destroyed 511 // when the last strong reference goes away. Since weakref_impl 512 // outlives the object, it is not destroyed in the dtor, and 513 // we'll have to do it here. 514 if (impl->mStrong.load(std::memory_order_relaxed) 515 == INITIAL_STRONG_VALUE) { 516 // Decrementing a weak count to zero when object never had a strong 517 // reference. We assume it acquired a weak reference early, e.g. 518 // in the constructor, and will eventually be properly destroyed, 519 // usually via incrementing and decrementing the strong count. 520 // Thus we no longer do anything here. We log this case, since it 521 // seems to be extremely rare, and should not normally occur. We 522 // used to deallocate mBase here, so this may now indicate a leak. 523 ALOGW("RefBase: Object at %p lost last weak reference " 524 "before it had a strong reference", impl->mBase); 525 } else { 526 // ALOGV("Freeing refs %p of old RefBase %p\n", this, impl->mBase); 527 delete impl; 528 } 529 } else { 530 // This is the OBJECT_LIFETIME_WEAK case. The last weak-reference 531 // is gone, we can destroy the object. 532 impl->mBase->onLastWeakRef(id); 533 delete impl->mBase; 534 } 535 } 536 537 bool RefBase::weakref_type::attemptIncStrong(const void* id) 538 { 539 incWeak(id); 540 541 weakref_impl* const impl = static_cast<weakref_impl*>(this); 542 int32_t curCount = impl->mStrong.load(std::memory_order_relaxed); 543 544 ALOG_ASSERT(curCount >= 0, 545 "attemptIncStrong called on %p after underflow", this); 546 547 while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) { 548 // we're in the easy/common case of promoting a weak-reference 549 // from an existing strong reference. 550 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1, 551 std::memory_order_relaxed)) { 552 break; 553 } 554 // the strong count has changed on us, we need to re-assert our 555 // situation. curCount was updated by compare_exchange_weak. 556 } 557 558 if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) { 559 // we're now in the harder case of either: 560 // - there never was a strong reference on us 561 // - or, all strong references have been released 562 int32_t flags = impl->mFlags.load(std::memory_order_relaxed); 563 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { 564 // this object has a "normal" life-time, i.e.: it gets destroyed 565 // when the last strong reference goes away 566 if (curCount <= 0) { 567 // the last strong-reference got released, the object cannot 568 // be revived. 569 decWeak(id); 570 return false; 571 } 572 573 // here, curCount == INITIAL_STRONG_VALUE, which means 574 // there never was a strong-reference, so we can try to 575 // promote this object; we need to do that atomically. 576 while (curCount > 0) { 577 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1, 578 std::memory_order_relaxed)) { 579 break; 580 } 581 // the strong count has changed on us, we need to re-assert our 582 // situation (e.g.: another thread has inc/decStrong'ed us) 583 // curCount has been updated. 584 } 585 586 if (curCount <= 0) { 587 // promote() failed, some other thread destroyed us in the 588 // meantime (i.e.: strong count reached zero). 589 decWeak(id); 590 return false; 591 } 592 } else { 593 // this object has an "extended" life-time, i.e.: it can be 594 // revived from a weak-reference only. 595 // Ask the object's implementation if it agrees to be revived 596 if (!impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id)) { 597 // it didn't so give-up. 598 decWeak(id); 599 return false; 600 } 601 // grab a strong-reference, which is always safe due to the 602 // extended life-time. 603 curCount = impl->mStrong.fetch_add(1, std::memory_order_relaxed); 604 // If the strong reference count has already been incremented by 605 // someone else, the implementor of onIncStrongAttempted() is holding 606 // an unneeded reference. So call onLastStrongRef() here to remove it. 607 // (No, this is not pretty.) Note that we MUST NOT do this if we 608 // are in fact acquiring the first reference. 609 if (curCount != 0 && curCount != INITIAL_STRONG_VALUE) { 610 impl->mBase->onLastStrongRef(id); 611 } 612 } 613 } 614 615 impl->addStrongRef(id); 616 617 #if PRINT_REFS 618 ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount); 619 #endif 620 621 // curCount is the value of mStrong before we incremented it. 622 // Now we need to fix-up the count if it was INITIAL_STRONG_VALUE. 623 // This must be done safely, i.e.: handle the case where several threads 624 // were here in attemptIncStrong(). 625 // curCount > INITIAL_STRONG_VALUE is OK, and can happen if we're doing 626 // this in the middle of another incStrong. The subtraction is handled 627 // by the thread that started with INITIAL_STRONG_VALUE. 628 if (curCount == INITIAL_STRONG_VALUE) { 629 impl->mStrong.fetch_sub(INITIAL_STRONG_VALUE, 630 std::memory_order_relaxed); 631 } 632 633 return true; 634 } 635 636 bool RefBase::weakref_type::attemptIncWeak(const void* id) 637 { 638 weakref_impl* const impl = static_cast<weakref_impl*>(this); 639 640 int32_t curCount = impl->mWeak.load(std::memory_order_relaxed); 641 ALOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow", 642 this); 643 while (curCount > 0) { 644 if (impl->mWeak.compare_exchange_weak(curCount, curCount+1, 645 std::memory_order_relaxed)) { 646 break; 647 } 648 // curCount has been updated. 649 } 650 651 if (curCount > 0) { 652 impl->addWeakRef(id); 653 } 654 655 return curCount > 0; 656 } 657 658 int32_t RefBase::weakref_type::getWeakCount() const 659 { 660 // Debug only! 661 return static_cast<const weakref_impl*>(this)->mWeak 662 .load(std::memory_order_relaxed); 663 } 664 665 void RefBase::weakref_type::printRefs() const 666 { 667 static_cast<const weakref_impl*>(this)->printRefs(); 668 } 669 670 void RefBase::weakref_type::trackMe(bool enable, bool retain) 671 { 672 static_cast<weakref_impl*>(this)->trackMe(enable, retain); 673 } 674 675 RefBase::weakref_type* RefBase::createWeak(const void* id) const 676 { 677 mRefs->incWeak(id); 678 return mRefs; 679 } 680 681 RefBase::weakref_type* RefBase::getWeakRefs() const 682 { 683 return mRefs; 684 } 685 686 RefBase::RefBase() 687 : mRefs(new weakref_impl(this)) 688 { 689 } 690 691 RefBase::~RefBase() 692 { 693 int32_t flags = mRefs->mFlags.load(std::memory_order_relaxed); 694 // Life-time of this object is extended to WEAK, in 695 // which case weakref_impl doesn't out-live the object and we 696 // can free it now. 697 if ((flags & OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_WEAK) { 698 // It's possible that the weak count is not 0 if the object 699 // re-acquired a weak reference in its destructor 700 if (mRefs->mWeak.load(std::memory_order_relaxed) == 0) { 701 delete mRefs; 702 } 703 } else if (mRefs->mStrong.load(std::memory_order_relaxed) 704 == INITIAL_STRONG_VALUE) { 705 // We never acquired a strong reference on this object. 706 LOG_ALWAYS_FATAL_IF(mRefs->mWeak.load() != 0, 707 "RefBase: Explicit destruction with non-zero weak " 708 "reference count"); 709 // TODO: Always report if we get here. Currently MediaMetadataRetriever 710 // C++ objects are inconsistently managed and sometimes get here. 711 // There may be other cases, but we believe they should all be fixed. 712 delete mRefs; 713 } 714 // For debugging purposes, clear mRefs. Ineffective against outstanding wp's. 715 const_cast<weakref_impl*&>(mRefs) = NULL; 716 } 717 718 void RefBase::extendObjectLifetime(int32_t mode) 719 { 720 // Must be happens-before ordered with respect to construction or any 721 // operation that could destroy the object. 722 mRefs->mFlags.fetch_or(mode, std::memory_order_relaxed); 723 } 724 725 void RefBase::onFirstRef() 726 { 727 } 728 729 void RefBase::onLastStrongRef(const void* /*id*/) 730 { 731 } 732 733 bool RefBase::onIncStrongAttempted(uint32_t flags, const void* /*id*/) 734 { 735 return (flags&FIRST_INC_STRONG) ? true : false; 736 } 737 738 void RefBase::onLastWeakRef(const void* /*id*/) 739 { 740 } 741 742 // --------------------------------------------------------------------------- 743 744 #if DEBUG_REFS 745 void RefBase::renameRefs(size_t n, const ReferenceRenamer& renamer) { 746 for (size_t i=0 ; i<n ; i++) { 747 renamer(i); 748 } 749 } 750 #else 751 void RefBase::renameRefs(size_t /*n*/, const ReferenceRenamer& /*renamer*/) { } 752 #endif 753 754 void RefBase::renameRefId(weakref_type* ref, 755 const void* old_id, const void* new_id) { 756 weakref_impl* const impl = static_cast<weakref_impl*>(ref); 757 impl->renameStrongRefId(old_id, new_id); 758 impl->renameWeakRefId(old_id, new_id); 759 } 760 761 void RefBase::renameRefId(RefBase* ref, 762 const void* old_id, const void* new_id) { 763 ref->mRefs->renameStrongRefId(old_id, new_id); 764 ref->mRefs->renameWeakRefId(old_id, new_id); 765 } 766 767 }; // namespace android 768