1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_ALLOCATION_RECORD_H_
18 #define ART_RUNTIME_GC_ALLOCATION_RECORD_H_
19 
20 #include <list>
21 #include <memory>
22 
23 #include "base/macros.h"
24 #include "base/mutex.h"
25 #include "gc_root.h"
26 #include "obj_ptr.h"
27 
28 namespace art HIDDEN {
29 
30 class ArtMethod;
31 class IsMarkedVisitor;
32 class Thread;
33 
34 namespace mirror {
35 class Class;
36 class Object;
37 }  // namespace mirror
38 
39 namespace gc {
40 
41 class AllocRecordStackTraceElement {
42  public:
43   int32_t ComputeLineNumber() const REQUIRES_SHARED(Locks::mutator_lock_);
44 
45   AllocRecordStackTraceElement() = default;
AllocRecordStackTraceElement(ArtMethod * method,uint32_t dex_pc)46   AllocRecordStackTraceElement(ArtMethod* method, uint32_t dex_pc)
47       : method_(method),
48         dex_pc_(dex_pc) {}
49 
GetMethod()50   ArtMethod* GetMethod() const {
51     return method_;
52   }
53 
SetMethod(ArtMethod * m)54   void SetMethod(ArtMethod* m) {
55     method_ = m;
56   }
57 
GetDexPc()58   uint32_t GetDexPc() const {
59     return dex_pc_;
60   }
61 
SetDexPc(uint32_t pc)62   void SetDexPc(uint32_t pc) {
63     dex_pc_ = pc;
64   }
65 
66   bool operator==(const AllocRecordStackTraceElement& other) const {
67     return method_ == other.method_ && dex_pc_ == other.dex_pc_;
68   }
69 
70  private:
71   ArtMethod* method_ = nullptr;
72   uint32_t dex_pc_ = 0;
73 };
74 
75 class AllocRecordStackTrace {
76  public:
77   static constexpr size_t kHashMultiplier = 17;
78 
79   AllocRecordStackTrace() = default;
80 
AllocRecordStackTrace(AllocRecordStackTrace && r)81   AllocRecordStackTrace(AllocRecordStackTrace&& r) noexcept
82       : tid_(r.tid_),
83         stack_(std::move(r.stack_)) {}
84 
AllocRecordStackTrace(const AllocRecordStackTrace & r)85   AllocRecordStackTrace(const AllocRecordStackTrace& r)
86       : tid_(r.tid_),
87         stack_(r.stack_) {}
88 
GetTid()89   pid_t GetTid() const {
90     return tid_;
91   }
92 
SetTid(pid_t t)93   void SetTid(pid_t t) {
94     tid_ = t;
95   }
96 
GetDepth()97   size_t GetDepth() const {
98     return stack_.size();
99   }
100 
GetStackElement(size_t index)101   const AllocRecordStackTraceElement& GetStackElement(size_t index) const {
102     DCHECK_LT(index, GetDepth());
103     return stack_[index];
104   }
105 
AddStackElement(const AllocRecordStackTraceElement & element)106   void AddStackElement(const AllocRecordStackTraceElement& element) {
107     stack_.push_back(element);
108   }
109 
SetStackElementAt(size_t index,ArtMethod * m,uint32_t dex_pc)110   void SetStackElementAt(size_t index, ArtMethod* m, uint32_t dex_pc) {
111     DCHECK_LT(index, stack_.size());
112     stack_[index].SetMethod(m);
113     stack_[index].SetDexPc(dex_pc);
114   }
115 
116   bool operator==(const AllocRecordStackTrace& other) const {
117     if (this == &other) return true;
118     return tid_ == other.tid_ && stack_ == other.stack_;
119   }
120 
121  private:
122   pid_t tid_ = 0;
123   std::vector<AllocRecordStackTraceElement> stack_;
124 };
125 
126 struct HashAllocRecordTypes {
operatorHashAllocRecordTypes127   size_t operator()(const AllocRecordStackTraceElement& r) const {
128     return std::hash<void*>()(reinterpret_cast<void*>(r.GetMethod())) *
129         AllocRecordStackTrace::kHashMultiplier + std::hash<uint32_t>()(r.GetDexPc());
130   }
131 
operatorHashAllocRecordTypes132   size_t operator()(const AllocRecordStackTrace& r) const {
133     size_t depth = r.GetDepth();
134     size_t result = r.GetTid() * AllocRecordStackTrace::kHashMultiplier + depth;
135     for (size_t i = 0; i < depth; ++i) {
136       result = result * AllocRecordStackTrace::kHashMultiplier + (*this)(r.GetStackElement(i));
137     }
138     return result;
139   }
140 };
141 
142 template <typename T> struct HashAllocRecordTypesPtr {
operatorHashAllocRecordTypesPtr143   size_t operator()(const T* r) const {
144     if (r == nullptr) return 0;
145     return HashAllocRecordTypes()(*r);
146   }
147 };
148 
149 template <typename T> struct EqAllocRecordTypesPtr {
operatorEqAllocRecordTypesPtr150   bool operator()(const T* r1, const T* r2) const {
151     if (r1 == r2) return true;
152     if (r1 == nullptr || r2 == nullptr) return false;
153     return *r1 == *r2;
154   }
155 };
156 
157 class AllocRecord {
158  public:
159   // All instances of AllocRecord should be managed by an instance of AllocRecordObjectMap.
AllocRecord(size_t count,mirror::Class * klass,AllocRecordStackTrace && trace)160   AllocRecord(size_t count, mirror::Class* klass, AllocRecordStackTrace&& trace)
161       : byte_count_(count), klass_(klass), trace_(std::move(trace)) {}
162 
GetDepth()163   size_t GetDepth() const {
164     return trace_.GetDepth();
165   }
166 
GetStackTrace()167   const AllocRecordStackTrace* GetStackTrace() const {
168     return &trace_;
169   }
170 
ByteCount()171   size_t ByteCount() const {
172     return byte_count_;
173   }
174 
GetTid()175   pid_t GetTid() const {
176     return trace_.GetTid();
177   }
178 
GetClass()179   mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
180     return klass_.Read();
181   }
182 
183   const char* GetClassDescriptor(std::string* storage) const
184       REQUIRES_SHARED(Locks::mutator_lock_);
185 
GetClassGcRoot()186   GcRoot<mirror::Class>& GetClassGcRoot() REQUIRES_SHARED(Locks::mutator_lock_) {
187     return klass_;
188   }
189 
StackElement(size_t index)190   const AllocRecordStackTraceElement& StackElement(size_t index) const {
191     return trace_.GetStackElement(index);
192   }
193 
194  private:
195   const size_t byte_count_;
196   // The klass_ could be a strong or weak root for GC
197   GcRoot<mirror::Class> klass_;
198   // TODO: Share between alloc records with identical stack traces.
199   AllocRecordStackTrace trace_;
200 };
201 
202 class AllocRecordObjectMap {
203  public:
204   static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
205   static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
206   static constexpr size_t kDefaultAllocStackDepth = 16;
207   static constexpr size_t kMaxSupportedStackDepth = 128;
208 
209   // GcRoot<mirror::Object> pointers in the list are weak roots, and the last recent_record_max_
210   // number of AllocRecord::klass_ pointers are strong roots (and the rest of klass_ pointers are
211   // weak roots). The last recent_record_max_ number of pairs in the list are always kept for DDMS's
212   // recent allocation tracking, but GcRoot<mirror::Object> pointers in these pairs can become null.
213   // Both types of pointers need read barriers, do not directly access them.
214   using EntryPair = std::pair<GcRoot<mirror::Object>, AllocRecord>;
215   using EntryList = std::list<EntryPair>;
216 
217   // Caller needs to check that it is enabled before calling since we read the stack trace before
218   // checking the enabled boolean.
219   EXPORT void RecordAllocation(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count)
220       REQUIRES(!Locks::alloc_tracker_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
221 
222   static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
223 
224   AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_);
225   ~AllocRecordObjectMap();
226 
Put(mirror::Object * obj,AllocRecord && record)227   void Put(mirror::Object* obj, AllocRecord&& record)
228       REQUIRES_SHARED(Locks::mutator_lock_)
229       REQUIRES(Locks::alloc_tracker_lock_) {
230     if (entries_.size() == alloc_record_max_) {
231       entries_.pop_front();
232     }
233     entries_.push_back(EntryPair(GcRoot<mirror::Object>(obj), std::move(record)));
234   }
235 
Size()236   size_t Size() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
237     return entries_.size();
238   }
239 
GetRecentAllocationSize()240   size_t GetRecentAllocationSize() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
241     CHECK_LE(recent_record_max_, alloc_record_max_);
242     size_t sz = entries_.size();
243     return std::min(recent_record_max_, sz);
244   }
245 
246   void VisitRoots(RootVisitor* visitor)
247       REQUIRES_SHARED(Locks::mutator_lock_)
248       REQUIRES(Locks::alloc_tracker_lock_);
249 
250   void SweepAllocationRecords(IsMarkedVisitor* visitor)
251       REQUIRES_SHARED(Locks::mutator_lock_)
252       REQUIRES(Locks::alloc_tracker_lock_);
253 
254   // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
255   // AllowNewAllocationRecords(), in which case new allocation records can be added although they
256   // should be disallowed. However, this is GC-safe because new objects are not processed in this GC
257   // cycle. The only downside of not handling this case is that such new allocation records can be
258   // swept from the list. But missing the first few records is acceptable for using the button to
259   // enable allocation tracking.
260   void DisallowNewAllocationRecords()
261       REQUIRES_SHARED(Locks::mutator_lock_)
262       REQUIRES(Locks::alloc_tracker_lock_);
263   void AllowNewAllocationRecords()
264       REQUIRES_SHARED(Locks::mutator_lock_)
265       REQUIRES(Locks::alloc_tracker_lock_);
266   void BroadcastForNewAllocationRecords()
267       REQUIRES(Locks::alloc_tracker_lock_);
268 
269   // TODO: Is there a better way to hide the entries_'s type?
Begin()270   EntryList::iterator Begin()
271       REQUIRES_SHARED(Locks::mutator_lock_)
272       REQUIRES(Locks::alloc_tracker_lock_) {
273     return entries_.begin();
274   }
275 
End()276   EntryList::iterator End()
277       REQUIRES_SHARED(Locks::mutator_lock_)
278       REQUIRES(Locks::alloc_tracker_lock_) {
279     return entries_.end();
280   }
281 
RBegin()282   EntryList::reverse_iterator RBegin()
283       REQUIRES_SHARED(Locks::mutator_lock_)
284       REQUIRES(Locks::alloc_tracker_lock_) {
285     return entries_.rbegin();
286   }
287 
REnd()288   EntryList::reverse_iterator REnd()
289       REQUIRES_SHARED(Locks::mutator_lock_)
290       REQUIRES(Locks::alloc_tracker_lock_) {
291     return entries_.rend();
292   }
293 
294   void Clear() REQUIRES(Locks::alloc_tracker_lock_);
295 
296  private:
297   size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumAllocRecords;
298   size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumRecentRecords;
299   size_t max_stack_depth_ = kDefaultAllocStackDepth;
300   bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_) = true;
301   ConditionVariable new_record_condition_ GUARDED_BY(Locks::alloc_tracker_lock_);
302   // see the comment in typedef of EntryList
303   EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);
304 
305   void SetMaxStackDepth(size_t max_stack_depth) REQUIRES(Locks::alloc_tracker_lock_);
306 };
307 
308 }  // namespace gc
309 }  // namespace art
310 #endif  // ART_RUNTIME_GC_ALLOCATION_RECORD_H_
311