1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "allocation_record.h"
18 
19 #include "art_method-inl.h"
20 #include "base/enums.h"
21 #include "base/logging.h"  // For VLOG
22 #include "base/stl_util.h"
23 #include "obj_ptr-inl.h"
24 #include "object_callbacks.h"
25 #include "stack.h"
26 
27 #ifdef ART_TARGET_ANDROID
28 #include "cutils/properties.h"
29 #endif
30 
31 namespace art {
32 namespace gc {
33 
ComputeLineNumber() const34 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
35   DCHECK(method_ != nullptr);
36   return method_->GetLineNumFromDexPC(dex_pc_);
37 }
38 
GetClassDescriptor(std::string * storage) const39 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
40   // klass_ could contain null only if we implement class unloading.
41   return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
42 }
43 
SetProperties()44 void AllocRecordObjectMap::SetProperties() {
45 #ifdef ART_TARGET_ANDROID
46   // Check whether there's a system property overriding the max number of records.
47   const char* propertyName = "dalvik.vm.allocTrackerMax";
48   char allocMaxString[PROPERTY_VALUE_MAX];
49   if (property_get(propertyName, allocMaxString, "") > 0) {
50     char* end;
51     size_t value = strtoul(allocMaxString, &end, 10);
52     if (*end != '\0') {
53       LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocMaxString
54                  << "' --- invalid";
55     } else {
56       alloc_record_max_ = value;
57       if (recent_record_max_ > value) {
58         recent_record_max_ = value;
59       }
60     }
61   }
62   // Check whether there's a system property overriding the number of recent records.
63   propertyName = "dalvik.vm.recentAllocMax";
64   char recentAllocMaxString[PROPERTY_VALUE_MAX];
65   if (property_get(propertyName, recentAllocMaxString, "") > 0) {
66     char* end;
67     size_t value = strtoul(recentAllocMaxString, &end, 10);
68     if (*end != '\0') {
69       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
70                  << "' --- invalid";
71     } else if (value > alloc_record_max_) {
72       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
73                  << "' --- should be less than " << alloc_record_max_;
74     } else {
75       recent_record_max_ = value;
76     }
77   }
78   // Check whether there's a system property overriding the max depth of stack trace.
79   propertyName = "debug.allocTracker.stackDepth";
80   char stackDepthString[PROPERTY_VALUE_MAX];
81   if (property_get(propertyName, stackDepthString, "") > 0) {
82     char* end;
83     size_t value = strtoul(stackDepthString, &end, 10);
84     if (*end != '\0') {
85       LOG(ERROR) << "Ignoring  " << propertyName << " '" << stackDepthString
86                  << "' --- invalid";
87     } else if (value > kMaxSupportedStackDepth) {
88       LOG(WARNING) << propertyName << " '" << stackDepthString << "' too large, using "
89                    << kMaxSupportedStackDepth;
90       max_stack_depth_ = kMaxSupportedStackDepth;
91     } else {
92       max_stack_depth_ = value;
93     }
94   }
95 #endif  // ART_TARGET_ANDROID
96 }
97 
~AllocRecordObjectMap()98 AllocRecordObjectMap::~AllocRecordObjectMap() {
99   Clear();
100 }
101 
VisitRoots(RootVisitor * visitor)102 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
103   CHECK_LE(recent_record_max_, alloc_record_max_);
104   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
105   size_t count = recent_record_max_;
106   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
107   // klass_ fields as strong roots.
108   for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
109     AllocRecord& record = it->second;
110     if (count > 0) {
111       buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
112       --count;
113     }
114     // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
115     // class unloading.
116     for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
117       const AllocRecordStackTraceElement& element = record.StackElement(i);
118       DCHECK(element.GetMethod() != nullptr);
119       element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
120     }
121   }
122 }
123 
SweepClassObject(AllocRecord * record,IsMarkedVisitor * visitor)124 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
125     REQUIRES_SHARED(Locks::mutator_lock_)
126     REQUIRES(Locks::alloc_tracker_lock_) {
127   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
128   // This does not need a read barrier because this is called by GC.
129   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
130   if (old_object != nullptr) {
131     // The class object can become null if we implement class unloading.
132     // In that case we might still want to keep the class name string (not implemented).
133     mirror::Object* new_object = visitor->IsMarked(old_object);
134     DCHECK(new_object != nullptr);
135     if (UNLIKELY(old_object != new_object)) {
136       klass = GcRoot<mirror::Class>(new_object->AsClass());
137     }
138   }
139 }
140 
SweepAllocationRecords(IsMarkedVisitor * visitor)141 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
142   VLOG(heap) << "Start SweepAllocationRecords()";
143   size_t count_deleted = 0, count_moved = 0, count = 0;
144   // Only the first (size - recent_record_max_) number of records can be deleted.
145   const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
146   for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
147     ++count;
148     // This does not need a read barrier because this is called by GC.
149     mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
150     AllocRecord& record = it->second;
151     mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
152     if (new_object == nullptr) {
153       if (count > delete_bound) {
154         it->first = GcRoot<mirror::Object>(nullptr);
155         SweepClassObject(&record, visitor);
156         ++it;
157       } else {
158         it = entries_.erase(it);
159         ++count_deleted;
160       }
161     } else {
162       if (old_object != new_object) {
163         it->first = GcRoot<mirror::Object>(new_object);
164         ++count_moved;
165       }
166       SweepClassObject(&record, visitor);
167       ++it;
168     }
169   }
170   VLOG(heap) << "Deleted " << count_deleted << " allocation records";
171   VLOG(heap) << "Updated " << count_moved << " allocation records";
172 }
173 
AllowNewAllocationRecords()174 void AllocRecordObjectMap::AllowNewAllocationRecords() {
175   CHECK(!kUseReadBarrier);
176   allow_new_record_ = true;
177   new_record_condition_.Broadcast(Thread::Current());
178 }
179 
DisallowNewAllocationRecords()180 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
181   CHECK(!kUseReadBarrier);
182   allow_new_record_ = false;
183 }
184 
BroadcastForNewAllocationRecords()185 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
186   new_record_condition_.Broadcast(Thread::Current());
187 }
188 
189 class AllocRecordStackVisitor : public StackVisitor {
190  public:
AllocRecordStackVisitor(Thread * thread,size_t max_depth,AllocRecordStackTrace * trace_out)191   AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
192       REQUIRES_SHARED(Locks::mutator_lock_)
193       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
194         max_depth_(max_depth),
195         trace_(trace_out) {}
196 
197   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
198   // annotalysis.
VisitFrame()199   bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
200     if (trace_->GetDepth() >= max_depth_) {
201       return false;
202     }
203     ArtMethod* m = GetMethod();
204     // m may be null if we have inlined methods of unresolved classes. b/27858645
205     if (m != nullptr && !m->IsRuntimeMethod()) {
206       m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
207       trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
208     }
209     return true;
210   }
211 
212  private:
213   const size_t max_depth_;
214   AllocRecordStackTrace* const trace_;
215 };
216 
SetAllocTrackingEnabled(bool enable)217 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
218   Thread* self = Thread::Current();
219   Heap* heap = Runtime::Current()->GetHeap();
220   if (enable) {
221     {
222       MutexLock mu(self, *Locks::alloc_tracker_lock_);
223       if (heap->IsAllocTrackingEnabled()) {
224         return;  // Already enabled, bail.
225       }
226       AllocRecordObjectMap* records = heap->GetAllocationRecords();
227       if (records == nullptr) {
228         records = new AllocRecordObjectMap;
229         heap->SetAllocationRecords(records);
230       }
231       CHECK(records != nullptr);
232       records->SetProperties();
233       std::string self_name;
234       self->GetThreadName(self_name);
235       if (self_name == "JDWP") {
236         records->alloc_ddm_thread_id_ = self->GetTid();
237       }
238       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
239                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
240       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
241                 << records->max_stack_depth_ << " frames, taking up to "
242                 << PrettySize(sz * records->alloc_record_max_) << ")";
243     }
244     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
245     {
246       MutexLock mu(self, *Locks::alloc_tracker_lock_);
247       heap->SetAllocTrackingEnabled(true);
248     }
249   } else {
250     // Delete outside of the critical section to avoid possible lock violations like the runtime
251     // shutdown lock.
252     {
253       MutexLock mu(self, *Locks::alloc_tracker_lock_);
254       if (!heap->IsAllocTrackingEnabled()) {
255         return;  // Already disabled, bail.
256       }
257       heap->SetAllocTrackingEnabled(false);
258       LOG(INFO) << "Disabling alloc tracker";
259       AllocRecordObjectMap* records = heap->GetAllocationRecords();
260       records->Clear();
261     }
262     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
263     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
264   }
265 }
266 
RecordAllocation(Thread * self,ObjPtr<mirror::Object> * obj,size_t byte_count)267 void AllocRecordObjectMap::RecordAllocation(Thread* self,
268                                             ObjPtr<mirror::Object>* obj,
269                                             size_t byte_count) {
270   // Get stack trace outside of lock in case there are allocations during the stack walk.
271   // b/27858645.
272   AllocRecordStackTrace trace;
273   AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
274   {
275     StackHandleScope<1> hs(self);
276     auto obj_wrapper = hs.NewHandleWrapper(obj);
277     visitor.WalkStack();
278   }
279 
280   MutexLock mu(self, *Locks::alloc_tracker_lock_);
281   Heap* const heap = Runtime::Current()->GetHeap();
282   if (!heap->IsAllocTrackingEnabled()) {
283     // In the process of shutting down recording, bail.
284     return;
285   }
286 
287   // Do not record for DDM thread.
288   if (alloc_ddm_thread_id_ == self->GetTid()) {
289     return;
290   }
291 
292   // Wait for GC's sweeping to complete and allow new records.
293   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
294                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
295     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
296     // presence of threads blocking for weak ref access.
297     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
298     new_record_condition_.WaitHoldingLocks(self);
299   }
300 
301   if (!heap->IsAllocTrackingEnabled()) {
302     // Return if the allocation tracking has been disabled while waiting for system weak access
303     // above.
304     return;
305   }
306 
307   DCHECK_LE(Size(), alloc_record_max_);
308 
309   // Erase extra unfilled elements.
310   trace.SetTid(self->GetTid());
311 
312   // Add the record.
313   Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
314   DCHECK_LE(Size(), alloc_record_max_);
315 }
316 
Clear()317 void AllocRecordObjectMap::Clear() {
318   entries_.clear();
319 }
320 
AllocRecordObjectMap()321 AllocRecordObjectMap::AllocRecordObjectMap()
322     : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
323 
324 }  // namespace gc
325 }  // namespace art
326