1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_SCHEDULER_TRACKED_REF_H_
6 #define BASE_TASK_SCHEDULER_TRACKED_REF_H_
7 
8 #include <memory>
9 
10 #include "base/atomic_ref_count.h"
11 #include "base/gtest_prod_util.h"
12 #include "base/logging.h"
13 #include "base/macros.h"
14 #include "base/memory/ptr_util.h"
15 #include "base/synchronization/waitable_event.h"
16 
17 namespace base {
18 namespace internal {
19 
20 // TrackedRefs are effectively a ref-counting scheme for objects that have a
21 // single owner.
22 //
23 // Deletion is still controlled by the single owner but ~T() itself will block
24 // until all the TrackedRefs handed by its TrackedRefFactory have been released
25 // (by ~TrackedRef<T>()).
26 //
27 // Just like WeakPtrFactory: TrackedRefFactory<T> should be the last member of T
28 // to ensure ~TrackedRefFactory<T>() runs first in ~T().
29 //
30 // The owner of a T should hence be certain that the last TrackedRefs to T are
31 // already gone or on their way out before destroying it or ~T() will hang
32 // (indicating a bug in the tear down logic -- proper refcounting on the other
33 // hand would result in a leak).
34 //
35 // TrackedRefFactory only makes sense to use on types that are always leaked in
36 // production but need to be torn down in tests (blocking destruction is
37 // impractical in production -- ref. ScopedAllowBaseSyncPrimitivesForTesting
38 // below).
39 //
40 // Why would we ever need such a thing? In task_scheduler there is a clear
41 // ownership hierarchy with mostly single owners and little refcounting. In
42 // production nothing is ever torn down so this isn't a problem. In tests
43 // however we must JoinForTesting(). At that point, all the raw back T* refs
44 // used by the worker threads are problematic because they can result in use-
45 // after-frees if a worker outlives the deletion of its corresponding
46 // TaskScheduler/TaskTracker/SchedulerWorkerPool/etc.
47 //
48 // JoinForTesting() isn't so hard when all workers are managed. But with cleanup
49 // semantics (reclaiming a worker who's been idle for too long) it becomes
50 // tricky because workers can go unaccounted for before they exit their main
51 // (https://crbug.com/827615).
52 //
53 // For that reason and to clearly document the ownership model, task_scheduler
54 // uses TrackedRefs.
55 //
56 // On top of being a clearer ownership model than proper refcounting, a hang in
57 // tear down in a test with out-of-order tear down logic is much preferred to
58 // letting its worker thread and associated constructs outlive the test
59 // (potentially resulting in flakes in unrelated tests running later in the same
60 // process).
61 //
62 // Note: While there's nothing task_scheduler specific about TrackedRefs it
63 // requires an ownership model where all the TrackedRefs are released on other
64 // threads in sync with ~T(). This isn't a typical use case beyond shutting down
65 // TaskScheduler in tests and as such this is kept internal here for now.
66 
67 template <class T>
68 class TrackedRefFactory;
69 
70 // TrackedRef<T> can be used like a T*.
71 template <class T>
72 class TrackedRef {
73  public:
74   // Moveable and copyable.
TrackedRef(TrackedRef<T> && other)75   TrackedRef(TrackedRef<T>&& other)
76       : ptr_(other.ptr_), factory_(other.factory_) {
77     // Null out |other_|'s factory so its destructor doesn't decrement
78     // |live_tracked_refs_|.
79     other.factory_ = nullptr;
80   }
TrackedRef(const TrackedRef<T> & other)81   TrackedRef(const TrackedRef<T>& other)
82       : ptr_(other.ptr_), factory_(other.factory_) {
83     factory_->live_tracked_refs_.Increment();
84   }
85 
86   // Intentionally not assignable for now because it makes the logic slightly
87   // convoluted and it's not a use case that makes sense for the types using
88   // this at the moment.
89   TrackedRef& operator=(TrackedRef<T>&& other) = delete;
90   TrackedRef& operator=(const TrackedRef<T>& other) = delete;
91 
~TrackedRef()92   ~TrackedRef() {
93     if (factory_ && !factory_->live_tracked_refs_.Decrement()) {
94       DCHECK(factory_->ready_to_destroy_);
95       DCHECK(!factory_->ready_to_destroy_->IsSignaled());
96       factory_->ready_to_destroy_->Signal();
97     }
98   }
99 
100   T& operator*() const { return *ptr_; }
101 
102   T* operator->() const { return ptr_; }
103 
104   explicit operator bool() const { return ptr_ != nullptr; }
105 
106  private:
107   friend class TrackedRefFactory<T>;
108 
TrackedRef(T * ptr,TrackedRefFactory<T> * factory)109   TrackedRef(T* ptr, TrackedRefFactory<T>* factory)
110       : ptr_(ptr), factory_(factory) {
111     factory_->live_tracked_refs_.Increment();
112   }
113 
114   T* ptr_;
115   TrackedRefFactory<T>* factory_;
116 };
117 
118 // TrackedRefFactory<T> should be the last member of T.
119 template <class T>
120 class TrackedRefFactory {
121  public:
TrackedRefFactory(T * ptr)122   TrackedRefFactory(T* ptr)
123       : ptr_(ptr), self_ref_(WrapUnique(new TrackedRef<T>(ptr_, this))) {
124     DCHECK(ptr_);
125   }
126 
~TrackedRefFactory()127   ~TrackedRefFactory() {
128     // Enter the destruction phase.
129     ready_to_destroy_ = std::make_unique<WaitableEvent>();
130 
131     // Release self-ref (if this was the last one it will signal the event right
132     // away).
133     self_ref_.reset();
134 
135     ready_to_destroy_->Wait();
136   }
137 
GetTrackedRef()138   TrackedRef<T> GetTrackedRef() {
139     // TrackedRefs cannot be obtained after |live_tracked_refs_| has already
140     // reached zero. In other words, the owner of a TrackedRefFactory shouldn't
141     // vend new TrackedRefs while it's being destroyed (owners of TrackedRefs
142     // may still copy/move their refs around during the destruction phase).
143     DCHECK(!live_tracked_refs_.IsZero());
144     return TrackedRef<T>(ptr_, this);
145   }
146 
147  private:
148   friend class TrackedRef<T>;
149   FRIEND_TEST_ALL_PREFIXES(TrackedRefTest, CopyAndMoveSemantics);
150 
151   T* const ptr_;
152 
153   // The number of live TrackedRefs vended by this factory.
154   AtomicRefCount live_tracked_refs_{0};
155 
156   // Non-null during the destruction phase. Signaled once |live_tracked_refs_|
157   // reaches 0. Note: while this could a direct member, only initializing it in
158   // the destruction phase avoids keeping a handle open for the entire session.
159   std::unique_ptr<WaitableEvent> ready_to_destroy_;
160 
161   // TrackedRefFactory holds a TrackedRef as well to prevent
162   // |live_tracked_refs_| from ever reaching zero before ~TrackedRefFactory().
163   std::unique_ptr<TrackedRef<T>> self_ref_;
164 
165   DISALLOW_COPY_AND_ASSIGN(TrackedRefFactory);
166 };
167 
168 }  // namespace internal
169 }  // namespace base
170 
171 #endif  // BASE_TASK_SCHEDULER_TRACKED_REF_H_
172