1 /* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "RefBase" 18 // #define LOG_NDEBUG 0 19 20 #include <memory> 21 22 #include <android-base/macros.h> 23 24 #include <log/log.h> 25 26 #include <utils/RefBase.h> 27 28 #include <utils/Mutex.h> 29 30 #ifndef __unused 31 #define __unused __attribute__((__unused__)) 32 #endif 33 34 // Compile with refcounting debugging enabled. 35 #define DEBUG_REFS 0 36 37 // The following three are ignored unless DEBUG_REFS is set. 38 39 // whether ref-tracking is enabled by default, if not, trackMe(true, false) 40 // needs to be called explicitly 41 #define DEBUG_REFS_ENABLED_BY_DEFAULT 0 42 43 // whether callstack are collected (significantly slows things down) 44 #define DEBUG_REFS_CALLSTACK_ENABLED 1 45 46 // folder where stack traces are saved when DEBUG_REFS is enabled 47 // this folder needs to exist and be writable 48 #define DEBUG_REFS_CALLSTACK_PATH "/data/debug" 49 50 // log all reference counting operations 51 #define PRINT_REFS 0 52 53 // Continue after logging a stack trace if ~RefBase discovers that reference 54 // count has never been incremented. Normally we conspicuously crash in that 55 // case. 56 #define DEBUG_REFBASE_DESTRUCTION 1 57 58 #if !defined(_WIN32) && !defined(__APPLE__) 59 // CallStack is only supported on linux type platforms. 60 #define CALLSTACK_ENABLED 1 61 #else 62 #define CALLSTACK_ENABLED 0 63 #endif 64 65 #if CALLSTACK_ENABLED 66 #include <utils/CallStack.h> 67 #endif 68 69 // --------------------------------------------------------------------------- 70 71 namespace android { 72 73 // Observations, invariants, etc: 74 75 // By default, obects are destroyed when the last strong reference disappears 76 // or, if the object never had a strong reference, when the last weak reference 77 // disappears. 78 // 79 // OBJECT_LIFETIME_WEAK changes this behavior to retain the object 80 // unconditionally until the last reference of either kind disappears. The 81 // client ensures that the extendObjectLifetime call happens before the dec 82 // call that would otherwise have deallocated the object, or before an 83 // attemptIncStrong call that might rely on it. We do not worry about 84 // concurrent changes to the object lifetime. 85 // 86 // AttemptIncStrong will succeed if the object has a strong reference, or if it 87 // has a weak reference and has never had a strong reference. 88 // AttemptIncWeak really does succeed only if there is already a WEAK 89 // reference, and thus may fail when attemptIncStrong would succeed. 90 // 91 // mStrong is the strong reference count. mWeak is the weak reference count. 92 // Between calls, and ignoring memory ordering effects, mWeak includes strong 93 // references, and is thus >= mStrong. 94 // 95 // A weakref_impl holds all the information, including both reference counts, 96 // required to perform wp<> operations. Thus these can continue to be performed 97 // after the RefBase object has been destroyed. 98 // 99 // A weakref_impl is allocated as the value of mRefs in a RefBase object on 100 // construction. 101 // In the OBJECT_LIFETIME_STRONG case, it is normally deallocated in decWeak, 102 // and hence lives as long as the last weak reference. (It can also be 103 // deallocated in the RefBase destructor iff the strong reference count was 104 // never incremented and the weak count is zero, e.g. if the RefBase object is 105 // explicitly destroyed without decrementing the strong count. This should be 106 // avoided.) In this case, the RefBase destructor should be invoked from 107 // decStrong. 108 // In the OBJECT_LIFETIME_WEAK case, the weakref_impl is always deallocated in 109 // the RefBase destructor, which is always invoked by decWeak. DecStrong 110 // explicitly avoids the deletion in this case. 111 // 112 // Memory ordering: 113 // The client must ensure that every inc() call, together with all other 114 // accesses to the object, happens before the corresponding dec() call. 115 // 116 // We try to keep memory ordering constraints on atomics as weak as possible, 117 // since memory fences or ordered memory accesses are likely to be a major 118 // performance cost for this code. All accesses to mStrong, mWeak, and mFlags 119 // explicitly relax memory ordering in some way. 120 // 121 // The only operations that are not memory_order_relaxed are reference count 122 // decrements. All reference count decrements are release operations. In 123 // addition, the final decrement leading the deallocation is followed by an 124 // acquire fence, which we can view informally as also turning it into an 125 // acquire operation. (See 29.8p4 [atomics.fences] for details. We could 126 // alternatively use acq_rel operations for all decrements. This is probably 127 // slower on most current (2016) hardware, especially on ARMv7, but that may 128 // not be true indefinitely.) 129 // 130 // This convention ensures that the second-to-last decrement synchronizes with 131 // (in the language of 1.10 in the C++ standard) the final decrement of a 132 // reference count. Since reference counts are only updated using atomic 133 // read-modify-write operations, this also extends to any earlier decrements. 134 // (See "release sequence" in 1.10.) 135 // 136 // Since all operations on an object happen before the corresponding reference 137 // count decrement, and all reference count decrements happen before the final 138 // one, we are guaranteed that all other object accesses happen before the 139 // object is destroyed. 140 141 142 #define INITIAL_STRONG_VALUE (1<<28) 143 144 #define MAX_COUNT 0xfffff 145 146 // Test whether the argument is a clearly invalid strong reference count. 147 // Used only for error checking on the value before an atomic decrement. 148 // Intended to be very cheap. 149 // Note that we cannot just check for excess decrements by comparing to zero 150 // since the object would be deallocated before that. 151 #define BAD_STRONG(c) \ 152 ((c) == 0 || ((c) & (~(MAX_COUNT | INITIAL_STRONG_VALUE))) != 0) 153 154 // Same for weak counts. 155 #define BAD_WEAK(c) ((c) == 0 || ((c) & (~MAX_COUNT)) != 0) 156 157 // --------------------------------------------------------------------------- 158 159 class RefBase::weakref_impl : public RefBase::weakref_type 160 { 161 public: 162 std::atomic<int32_t> mStrong; 163 std::atomic<int32_t> mWeak; 164 RefBase* const mBase; 165 std::atomic<int32_t> mFlags; 166 167 #if !DEBUG_REFS 168 169 explicit weakref_impl(RefBase* base) 170 : mStrong(INITIAL_STRONG_VALUE) 171 , mWeak(0) 172 , mBase(base) 173 , mFlags(0) 174 { 175 } 176 177 void addStrongRef(const void* /*id*/) { } 178 void removeStrongRef(const void* /*id*/) { } 179 void renameStrongRefId(const void* /*old_id*/, const void* /*new_id*/) { } 180 void addWeakRef(const void* /*id*/) { } 181 void removeWeakRef(const void* /*id*/) { } 182 void renameWeakRefId(const void* /*old_id*/, const void* /*new_id*/) { } 183 void printRefs() const { } 184 void trackMe(bool, bool) { } 185 186 #else 187 188 weakref_impl(RefBase* base) 189 : mStrong(INITIAL_STRONG_VALUE) 190 , mWeak(0) 191 , mBase(base) 192 , mFlags(0) 193 , mStrongRefs(NULL) 194 , mWeakRefs(NULL) 195 , mTrackEnabled(!!DEBUG_REFS_ENABLED_BY_DEFAULT) 196 , mRetain(false) 197 { 198 } 199 200 ~weakref_impl() 201 { 202 bool dumpStack = false; 203 if (!mRetain && mStrongRefs != NULL) { 204 dumpStack = true; 205 ALOGE("Strong references remain:"); 206 ref_entry* refs = mStrongRefs; 207 while (refs) { 208 char inc = refs->ref >= 0 ? '+' : '-'; 209 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref); 210 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED 211 CallStack::logStack(LOG_TAG, refs->stack.get()); 212 #endif 213 refs = refs->next; 214 } 215 } 216 217 if (!mRetain && mWeakRefs != NULL) { 218 dumpStack = true; 219 ALOGE("Weak references remain!"); 220 ref_entry* refs = mWeakRefs; 221 while (refs) { 222 char inc = refs->ref >= 0 ? '+' : '-'; 223 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref); 224 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED 225 CallStack::logStack(LOG_TAG, refs->stack.get()); 226 #endif 227 refs = refs->next; 228 } 229 } 230 if (dumpStack) { 231 ALOGE("above errors at:"); 232 #if CALLSTACK_ENABLED 233 CallStack::logStack(LOG_TAG); 234 #endif 235 } 236 } 237 238 void addStrongRef(const void* id) { 239 //ALOGD_IF(mTrackEnabled, 240 // "addStrongRef: RefBase=%p, id=%p", mBase, id); 241 addRef(&mStrongRefs, id, mStrong.load(std::memory_order_relaxed)); 242 } 243 244 void removeStrongRef(const void* id) { 245 //ALOGD_IF(mTrackEnabled, 246 // "removeStrongRef: RefBase=%p, id=%p", mBase, id); 247 if (!mRetain) { 248 removeRef(&mStrongRefs, id); 249 } else { 250 addRef(&mStrongRefs, id, -mStrong.load(std::memory_order_relaxed)); 251 } 252 } 253 254 void renameStrongRefId(const void* old_id, const void* new_id) { 255 //ALOGD_IF(mTrackEnabled, 256 // "renameStrongRefId: RefBase=%p, oid=%p, nid=%p", 257 // mBase, old_id, new_id); 258 renameRefsId(mStrongRefs, old_id, new_id); 259 } 260 261 void addWeakRef(const void* id) { 262 addRef(&mWeakRefs, id, mWeak.load(std::memory_order_relaxed)); 263 } 264 265 void removeWeakRef(const void* id) { 266 if (!mRetain) { 267 removeRef(&mWeakRefs, id); 268 } else { 269 addRef(&mWeakRefs, id, -mWeak.load(std::memory_order_relaxed)); 270 } 271 } 272 273 void renameWeakRefId(const void* old_id, const void* new_id) { 274 renameRefsId(mWeakRefs, old_id, new_id); 275 } 276 277 void trackMe(bool track, bool retain) { 278 mTrackEnabled = track; 279 mRetain = retain; 280 } 281 282 void printRefs() const 283 { 284 String8 text; 285 286 { 287 Mutex::Autolock _l(mMutex); 288 char buf[128]; 289 snprintf(buf, sizeof(buf), 290 "Strong references on RefBase %p (weakref_type %p):\n", 291 mBase, this); 292 text.append(buf); 293 printRefsLocked(&text, mStrongRefs); 294 snprintf(buf, sizeof(buf), 295 "Weak references on RefBase %p (weakref_type %p):\n", 296 mBase, this); 297 text.append(buf); 298 printRefsLocked(&text, mWeakRefs); 299 } 300 301 { 302 char name[100]; 303 snprintf(name, sizeof(name), DEBUG_REFS_CALLSTACK_PATH "/%p.stack", 304 this); 305 int rc = open(name, O_RDWR | O_CREAT | O_APPEND, 644); 306 if (rc >= 0) { 307 (void)write(rc, text.string(), text.length()); 308 close(rc); 309 ALOGD("STACK TRACE for %p saved in %s", this, name); 310 } 311 else ALOGE("FAILED TO PRINT STACK TRACE for %p in %s: %s", this, 312 name, strerror(errno)); 313 } 314 } 315 316 private: 317 struct ref_entry 318 { 319 ref_entry* next; 320 const void* id; 321 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED 322 CallStack::CallStackUPtr stack; 323 #endif 324 int32_t ref; 325 }; 326 327 void addRef(ref_entry** refs, const void* id, int32_t mRef) 328 { 329 if (mTrackEnabled) { 330 AutoMutex _l(mMutex); 331 332 ref_entry* ref = new ref_entry; 333 // Reference count at the time of the snapshot, but before the 334 // update. Positive value means we increment, negative--we 335 // decrement the reference count. 336 ref->ref = mRef; 337 ref->id = id; 338 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED 339 ref->stack = CallStack::getCurrent(2); 340 #endif 341 ref->next = *refs; 342 *refs = ref; 343 } 344 } 345 346 void removeRef(ref_entry** refs, const void* id) 347 { 348 if (mTrackEnabled) { 349 AutoMutex _l(mMutex); 350 351 ref_entry* const head = *refs; 352 ref_entry* ref = head; 353 while (ref != NULL) { 354 if (ref->id == id) { 355 *refs = ref->next; 356 delete ref; 357 return; 358 } 359 refs = &ref->next; 360 ref = *refs; 361 } 362 363 ALOGE("RefBase: removing id %p on RefBase %p" 364 "(weakref_type %p) that doesn't exist!", 365 id, mBase, this); 366 367 ref = head; 368 while (ref) { 369 char inc = ref->ref >= 0 ? '+' : '-'; 370 ALOGD("\t%c ID %p (ref %d):", inc, ref->id, ref->ref); 371 ref = ref->next; 372 } 373 374 #if CALLSTACK_ENABLED 375 CallStack::logStack(LOG_TAG); 376 #endif 377 } 378 } 379 380 void renameRefsId(ref_entry* r, const void* old_id, const void* new_id) 381 { 382 if (mTrackEnabled) { 383 AutoMutex _l(mMutex); 384 ref_entry* ref = r; 385 while (ref != NULL) { 386 if (ref->id == old_id) { 387 ref->id = new_id; 388 } 389 ref = ref->next; 390 } 391 } 392 } 393 394 void printRefsLocked(String8* out, const ref_entry* refs) const 395 { 396 char buf[128]; 397 while (refs) { 398 char inc = refs->ref >= 0 ? '+' : '-'; 399 snprintf(buf, sizeof(buf), "\t%c ID %p (ref %d):\n", 400 inc, refs->id, refs->ref); 401 out->append(buf); 402 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED 403 out->append(CallStack::stackToString("\t\t", refs->stack.get())); 404 #else 405 out->append("\t\t(call stacks disabled)"); 406 #endif 407 refs = refs->next; 408 } 409 } 410 411 mutable Mutex mMutex; 412 ref_entry* mStrongRefs; 413 ref_entry* mWeakRefs; 414 415 bool mTrackEnabled; 416 // Collect stack traces on addref and removeref, instead of deleting the stack references 417 // on removeref that match the address ones. 418 bool mRetain; 419 420 #endif 421 }; 422 423 // --------------------------------------------------------------------------- 424 425 void RefBase::incStrong(const void* id) const 426 { 427 weakref_impl* const refs = mRefs; 428 refs->incWeak(id); 429 430 refs->addStrongRef(id); 431 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed); 432 ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs); 433 #if PRINT_REFS 434 ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c); 435 #endif 436 if (c != INITIAL_STRONG_VALUE) { 437 return; 438 } 439 440 int32_t old __unused = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, std::memory_order_relaxed); 441 // A decStrong() must still happen after us. 442 ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old); 443 refs->mBase->onFirstRef(); 444 } 445 446 void RefBase::incStrongRequireStrong(const void* id) const { 447 weakref_impl* const refs = mRefs; 448 refs->incWeak(id); 449 450 refs->addStrongRef(id); 451 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed); 452 453 LOG_ALWAYS_FATAL_IF(c <= 0 || c == INITIAL_STRONG_VALUE, 454 "incStrongRequireStrong() called on %p which isn't already owned", refs); 455 #if PRINT_REFS 456 ALOGD("incStrong (requiring strong) of %p from %p: cnt=%d\n", this, id, c); 457 #endif 458 } 459 460 void RefBase::decStrong(const void* id) const 461 { 462 weakref_impl* const refs = mRefs; 463 refs->removeStrongRef(id); 464 const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release); 465 #if PRINT_REFS 466 ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c); 467 #endif 468 LOG_ALWAYS_FATAL_IF(BAD_STRONG(c), "decStrong() called on %p too many times", 469 refs); 470 if (c == 1) { 471 std::atomic_thread_fence(std::memory_order_acquire); 472 refs->mBase->onLastStrongRef(id); 473 int32_t flags = refs->mFlags.load(std::memory_order_relaxed); 474 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { 475 delete this; 476 // The destructor does not delete refs in this case. 477 } 478 } 479 // Note that even with only strong reference operations, the thread 480 // deallocating this may not be the same as the thread deallocating refs. 481 // That's OK: all accesses to this happen before its deletion here, 482 // and all accesses to refs happen before its deletion in the final decWeak. 483 // The destructor can safely access mRefs because either it's deleting 484 // mRefs itself, or it's running entirely before the final mWeak decrement. 485 // 486 // Since we're doing atomic loads of `flags`, the static analyzer assumes 487 // they can change between `delete this;` and `refs->decWeak(id);`. This is 488 // not the case. The analyzer may become more okay with this patten when 489 // https://bugs.llvm.org/show_bug.cgi?id=34365 gets resolved. NOLINTNEXTLINE 490 refs->decWeak(id); 491 } 492 493 void RefBase::forceIncStrong(const void* id) const 494 { 495 // Allows initial mStrong of 0 in addition to INITIAL_STRONG_VALUE. 496 // TODO: Better document assumptions. 497 weakref_impl* const refs = mRefs; 498 refs->incWeak(id); 499 500 refs->addStrongRef(id); 501 const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed); 502 ALOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow", 503 refs); 504 #if PRINT_REFS 505 ALOGD("forceIncStrong of %p from %p: cnt=%d\n", this, id, c); 506 #endif 507 508 switch (c) { 509 case INITIAL_STRONG_VALUE: 510 refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, 511 std::memory_order_relaxed); 512 FALLTHROUGH_INTENDED; 513 case 0: 514 refs->mBase->onFirstRef(); 515 } 516 } 517 518 int32_t RefBase::getStrongCount() const 519 { 520 // Debugging only; No memory ordering guarantees. 521 return mRefs->mStrong.load(std::memory_order_relaxed); 522 } 523 524 RefBase* RefBase::weakref_type::refBase() const 525 { 526 return static_cast<const weakref_impl*>(this)->mBase; 527 } 528 529 void RefBase::weakref_type::incWeak(const void* id) 530 { 531 weakref_impl* const impl = static_cast<weakref_impl*>(this); 532 impl->addWeakRef(id); 533 const int32_t c __unused = impl->mWeak.fetch_add(1, 534 std::memory_order_relaxed); 535 ALOG_ASSERT(c >= 0, "incWeak called on %p after last weak ref", this); 536 } 537 538 void RefBase::weakref_type::incWeakRequireWeak(const void* id) 539 { 540 weakref_impl* const impl = static_cast<weakref_impl*>(this); 541 impl->addWeakRef(id); 542 const int32_t c __unused = impl->mWeak.fetch_add(1, 543 std::memory_order_relaxed); 544 LOG_ALWAYS_FATAL_IF(c <= 0, "incWeakRequireWeak called on %p which has no weak refs", this); 545 } 546 547 void RefBase::weakref_type::decWeak(const void* id) 548 { 549 weakref_impl* const impl = static_cast<weakref_impl*>(this); 550 impl->removeWeakRef(id); 551 const int32_t c = impl->mWeak.fetch_sub(1, std::memory_order_release); 552 LOG_ALWAYS_FATAL_IF(BAD_WEAK(c), "decWeak called on %p too many times", 553 this); 554 if (c != 1) return; 555 atomic_thread_fence(std::memory_order_acquire); 556 557 int32_t flags = impl->mFlags.load(std::memory_order_relaxed); 558 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { 559 // This is the regular lifetime case. The object is destroyed 560 // when the last strong reference goes away. Since weakref_impl 561 // outlives the object, it is not destroyed in the dtor, and 562 // we'll have to do it here. 563 if (impl->mStrong.load(std::memory_order_relaxed) 564 == INITIAL_STRONG_VALUE) { 565 // Decrementing a weak count to zero when object never had a strong 566 // reference. We assume it acquired a weak reference early, e.g. 567 // in the constructor, and will eventually be properly destroyed, 568 // usually via incrementing and decrementing the strong count. 569 // Thus we no longer do anything here. We log this case, since it 570 // seems to be extremely rare, and should not normally occur. We 571 // used to deallocate mBase here, so this may now indicate a leak. 572 ALOGW("RefBase: Object at %p lost last weak reference " 573 "before it had a strong reference", impl->mBase); 574 } else { 575 // ALOGV("Freeing refs %p of old RefBase %p\n", this, impl->mBase); 576 delete impl; 577 } 578 } else { 579 // This is the OBJECT_LIFETIME_WEAK case. The last weak-reference 580 // is gone, we can destroy the object. 581 impl->mBase->onLastWeakRef(id); 582 delete impl->mBase; 583 } 584 } 585 586 bool RefBase::weakref_type::attemptIncStrong(const void* id) 587 { 588 incWeak(id); 589 590 weakref_impl* const impl = static_cast<weakref_impl*>(this); 591 int32_t curCount = impl->mStrong.load(std::memory_order_relaxed); 592 593 ALOG_ASSERT(curCount >= 0, 594 "attemptIncStrong called on %p after underflow", this); 595 596 while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) { 597 // we're in the easy/common case of promoting a weak-reference 598 // from an existing strong reference. 599 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1, 600 std::memory_order_relaxed)) { 601 break; 602 } 603 // the strong count has changed on us, we need to re-assert our 604 // situation. curCount was updated by compare_exchange_weak. 605 } 606 607 if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) { 608 // we're now in the harder case of either: 609 // - there never was a strong reference on us 610 // - or, all strong references have been released 611 int32_t flags = impl->mFlags.load(std::memory_order_relaxed); 612 if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) { 613 // this object has a "normal" life-time, i.e.: it gets destroyed 614 // when the last strong reference goes away 615 if (curCount <= 0) { 616 // the last strong-reference got released, the object cannot 617 // be revived. 618 decWeak(id); 619 return false; 620 } 621 622 // here, curCount == INITIAL_STRONG_VALUE, which means 623 // there never was a strong-reference, so we can try to 624 // promote this object; we need to do that atomically. 625 while (curCount > 0) { 626 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1, 627 std::memory_order_relaxed)) { 628 break; 629 } 630 // the strong count has changed on us, we need to re-assert our 631 // situation (e.g.: another thread has inc/decStrong'ed us) 632 // curCount has been updated. 633 } 634 635 if (curCount <= 0) { 636 // promote() failed, some other thread destroyed us in the 637 // meantime (i.e.: strong count reached zero). 638 decWeak(id); 639 return false; 640 } 641 } else { 642 // this object has an "extended" life-time, i.e.: it can be 643 // revived from a weak-reference only. 644 // Ask the object's implementation if it agrees to be revived 645 if (!impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id)) { 646 // it didn't so give-up. 647 decWeak(id); 648 return false; 649 } 650 // grab a strong-reference, which is always safe due to the 651 // extended life-time. 652 curCount = impl->mStrong.fetch_add(1, std::memory_order_relaxed); 653 // If the strong reference count has already been incremented by 654 // someone else, the implementor of onIncStrongAttempted() is holding 655 // an unneeded reference. So call onLastStrongRef() here to remove it. 656 // (No, this is not pretty.) Note that we MUST NOT do this if we 657 // are in fact acquiring the first reference. 658 if (curCount != 0 && curCount != INITIAL_STRONG_VALUE) { 659 impl->mBase->onLastStrongRef(id); 660 } 661 } 662 } 663 664 impl->addStrongRef(id); 665 666 #if PRINT_REFS 667 ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount); 668 #endif 669 670 // curCount is the value of mStrong before we incremented it. 671 // Now we need to fix-up the count if it was INITIAL_STRONG_VALUE. 672 // This must be done safely, i.e.: handle the case where several threads 673 // were here in attemptIncStrong(). 674 // curCount > INITIAL_STRONG_VALUE is OK, and can happen if we're doing 675 // this in the middle of another incStrong. The subtraction is handled 676 // by the thread that started with INITIAL_STRONG_VALUE. 677 if (curCount == INITIAL_STRONG_VALUE) { 678 impl->mStrong.fetch_sub(INITIAL_STRONG_VALUE, 679 std::memory_order_relaxed); 680 } 681 682 return true; 683 } 684 685 bool RefBase::weakref_type::attemptIncWeak(const void* id) 686 { 687 weakref_impl* const impl = static_cast<weakref_impl*>(this); 688 689 int32_t curCount = impl->mWeak.load(std::memory_order_relaxed); 690 ALOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow", 691 this); 692 while (curCount > 0) { 693 if (impl->mWeak.compare_exchange_weak(curCount, curCount+1, 694 std::memory_order_relaxed)) { 695 break; 696 } 697 // curCount has been updated. 698 } 699 700 if (curCount > 0) { 701 impl->addWeakRef(id); 702 } 703 704 return curCount > 0; 705 } 706 707 int32_t RefBase::weakref_type::getWeakCount() const 708 { 709 // Debug only! 710 return static_cast<const weakref_impl*>(this)->mWeak 711 .load(std::memory_order_relaxed); 712 } 713 714 void RefBase::weakref_type::printRefs() const 715 { 716 static_cast<const weakref_impl*>(this)->printRefs(); 717 } 718 719 void RefBase::weakref_type::trackMe(bool enable, bool retain) 720 { 721 static_cast<weakref_impl*>(this)->trackMe(enable, retain); 722 } 723 724 RefBase::weakref_type* RefBase::createWeak(const void* id) const 725 { 726 mRefs->incWeak(id); 727 return mRefs; 728 } 729 730 RefBase::weakref_type* RefBase::getWeakRefs() const 731 { 732 return mRefs; 733 } 734 735 RefBase::RefBase() 736 : mRefs(new weakref_impl(this)) 737 { 738 } 739 740 RefBase::~RefBase() 741 { 742 int32_t flags = mRefs->mFlags.load(std::memory_order_relaxed); 743 // Life-time of this object is extended to WEAK, in 744 // which case weakref_impl doesn't out-live the object and we 745 // can free it now. 746 if ((flags & OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_WEAK) { 747 // It's possible that the weak count is not 0 if the object 748 // re-acquired a weak reference in its destructor 749 if (mRefs->mWeak.load(std::memory_order_relaxed) == 0) { 750 delete mRefs; 751 } 752 } else if (mRefs->mStrong.load(std::memory_order_relaxed) == INITIAL_STRONG_VALUE) { 753 // We never acquired a strong reference on this object. 754 #if DEBUG_REFBASE_DESTRUCTION 755 // Treating this as fatal is prone to causing boot loops. For debugging, it's 756 // better to treat as non-fatal. 757 ALOGD("RefBase: Explicit destruction, weak count = %d (in %p)", mRefs->mWeak.load(), this); 758 759 #if CALLSTACK_ENABLED 760 CallStack::logStack(LOG_TAG); 761 #endif 762 #else 763 LOG_ALWAYS_FATAL("RefBase: Explicit destruction, weak count = %d", mRefs->mWeak.load()); 764 #endif 765 } 766 // For debugging purposes, clear mRefs. Ineffective against outstanding wp's. 767 const_cast<weakref_impl*&>(mRefs) = nullptr; 768 } 769 770 void RefBase::extendObjectLifetime(int32_t mode) 771 { 772 // Must be happens-before ordered with respect to construction or any 773 // operation that could destroy the object. 774 mRefs->mFlags.fetch_or(mode, std::memory_order_relaxed); 775 } 776 777 void RefBase::onFirstRef() 778 { 779 } 780 781 void RefBase::onLastStrongRef(const void* /*id*/) 782 { 783 } 784 785 bool RefBase::onIncStrongAttempted(uint32_t flags, const void* /*id*/) 786 { 787 return (flags&FIRST_INC_STRONG) ? true : false; 788 } 789 790 void RefBase::onLastWeakRef(const void* /*id*/) 791 { 792 } 793 794 // --------------------------------------------------------------------------- 795 796 #if DEBUG_REFS 797 void RefBase::renameRefs(size_t n, const ReferenceRenamer& renamer) { 798 for (size_t i=0 ; i<n ; i++) { 799 renamer(i); 800 } 801 } 802 #else 803 void RefBase::renameRefs(size_t /*n*/, const ReferenceRenamer& /*renamer*/) { } 804 #endif 805 806 void RefBase::renameRefId(weakref_type* ref, 807 const void* old_id, const void* new_id) { 808 weakref_impl* const impl = static_cast<weakref_impl*>(ref); 809 impl->renameStrongRefId(old_id, new_id); 810 impl->renameWeakRefId(old_id, new_id); 811 } 812 813 void RefBase::renameRefId(RefBase* ref, 814 const void* old_id, const void* new_id) { 815 ref->mRefs->renameStrongRefId(old_id, new_id); 816 ref->mRefs->renameWeakRefId(old_id, new_id); 817 } 818 819 }; // namespace android 820