1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "allocation_record.h"
18 
19 #include "art_method-inl.h"
20 #include "base/enums.h"
21 #include "base/stl_util.h"
22 #include "obj_ptr-inl.h"
23 #include "stack.h"
24 
25 #ifdef ART_TARGET_ANDROID
26 #include "cutils/properties.h"
27 #endif
28 
29 namespace art {
30 namespace gc {
31 
ComputeLineNumber() const32 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
33   DCHECK(method_ != nullptr);
34   return method_->GetLineNumFromDexPC(dex_pc_);
35 }
36 
GetClassDescriptor(std::string * storage) const37 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
38   // klass_ could contain null only if we implement class unloading.
39   return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
40 }
41 
SetProperties()42 void AllocRecordObjectMap::SetProperties() {
43 #ifdef ART_TARGET_ANDROID
44   // Check whether there's a system property overriding the max number of records.
45   const char* propertyName = "dalvik.vm.allocTrackerMax";
46   char allocMaxString[PROPERTY_VALUE_MAX];
47   if (property_get(propertyName, allocMaxString, "") > 0) {
48     char* end;
49     size_t value = strtoul(allocMaxString, &end, 10);
50     if (*end != '\0') {
51       LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocMaxString
52                  << "' --- invalid";
53     } else {
54       alloc_record_max_ = value;
55       if (recent_record_max_ > value) {
56         recent_record_max_ = value;
57       }
58     }
59   }
60   // Check whether there's a system property overriding the number of recent records.
61   propertyName = "dalvik.vm.recentAllocMax";
62   char recentAllocMaxString[PROPERTY_VALUE_MAX];
63   if (property_get(propertyName, recentAllocMaxString, "") > 0) {
64     char* end;
65     size_t value = strtoul(recentAllocMaxString, &end, 10);
66     if (*end != '\0') {
67       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
68                  << "' --- invalid";
69     } else if (value > alloc_record_max_) {
70       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
71                  << "' --- should be less than " << alloc_record_max_;
72     } else {
73       recent_record_max_ = value;
74     }
75   }
76   // Check whether there's a system property overriding the max depth of stack trace.
77   propertyName = "debug.allocTracker.stackDepth";
78   char stackDepthString[PROPERTY_VALUE_MAX];
79   if (property_get(propertyName, stackDepthString, "") > 0) {
80     char* end;
81     size_t value = strtoul(stackDepthString, &end, 10);
82     if (*end != '\0') {
83       LOG(ERROR) << "Ignoring  " << propertyName << " '" << stackDepthString
84                  << "' --- invalid";
85     } else if (value > kMaxSupportedStackDepth) {
86       LOG(WARNING) << propertyName << " '" << stackDepthString << "' too large, using "
87                    << kMaxSupportedStackDepth;
88       max_stack_depth_ = kMaxSupportedStackDepth;
89     } else {
90       max_stack_depth_ = value;
91     }
92   }
93 #endif  // ART_TARGET_ANDROID
94 }
95 
~AllocRecordObjectMap()96 AllocRecordObjectMap::~AllocRecordObjectMap() {
97   Clear();
98 }
99 
VisitRoots(RootVisitor * visitor)100 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
101   CHECK_LE(recent_record_max_, alloc_record_max_);
102   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
103   size_t count = recent_record_max_;
104   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
105   // klass_ fields as strong roots.
106   for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
107     AllocRecord& record = it->second;
108     if (count > 0) {
109       buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
110       --count;
111     }
112     // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
113     // class unloading.
114     for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
115       const AllocRecordStackTraceElement& element = record.StackElement(i);
116       DCHECK(element.GetMethod() != nullptr);
117       element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
118     }
119   }
120 }
121 
SweepClassObject(AllocRecord * record,IsMarkedVisitor * visitor)122 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
123     REQUIRES_SHARED(Locks::mutator_lock_)
124     REQUIRES(Locks::alloc_tracker_lock_) {
125   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
126   // This does not need a read barrier because this is called by GC.
127   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
128   if (old_object != nullptr) {
129     // The class object can become null if we implement class unloading.
130     // In that case we might still want to keep the class name string (not implemented).
131     mirror::Object* new_object = visitor->IsMarked(old_object);
132     DCHECK(new_object != nullptr);
133     if (UNLIKELY(old_object != new_object)) {
134       klass = GcRoot<mirror::Class>(new_object->AsClass());
135     }
136   }
137 }
138 
SweepAllocationRecords(IsMarkedVisitor * visitor)139 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
140   VLOG(heap) << "Start SweepAllocationRecords()";
141   size_t count_deleted = 0, count_moved = 0, count = 0;
142   // Only the first (size - recent_record_max_) number of records can be deleted.
143   const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
144   for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
145     ++count;
146     // This does not need a read barrier because this is called by GC.
147     mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
148     AllocRecord& record = it->second;
149     mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
150     if (new_object == nullptr) {
151       if (count > delete_bound) {
152         it->first = GcRoot<mirror::Object>(nullptr);
153         SweepClassObject(&record, visitor);
154         ++it;
155       } else {
156         it = entries_.erase(it);
157         ++count_deleted;
158       }
159     } else {
160       if (old_object != new_object) {
161         it->first = GcRoot<mirror::Object>(new_object);
162         ++count_moved;
163       }
164       SweepClassObject(&record, visitor);
165       ++it;
166     }
167   }
168   VLOG(heap) << "Deleted " << count_deleted << " allocation records";
169   VLOG(heap) << "Updated " << count_moved << " allocation records";
170 }
171 
AllowNewAllocationRecords()172 void AllocRecordObjectMap::AllowNewAllocationRecords() {
173   CHECK(!kUseReadBarrier);
174   allow_new_record_ = true;
175   new_record_condition_.Broadcast(Thread::Current());
176 }
177 
DisallowNewAllocationRecords()178 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
179   CHECK(!kUseReadBarrier);
180   allow_new_record_ = false;
181 }
182 
BroadcastForNewAllocationRecords()183 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
184   new_record_condition_.Broadcast(Thread::Current());
185 }
186 
187 class AllocRecordStackVisitor : public StackVisitor {
188  public:
AllocRecordStackVisitor(Thread * thread,size_t max_depth,AllocRecordStackTrace * trace_out)189   AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
190       REQUIRES_SHARED(Locks::mutator_lock_)
191       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
192         max_depth_(max_depth),
193         trace_(trace_out) {}
194 
195   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
196   // annotalysis.
VisitFrame()197   bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
198     if (trace_->GetDepth() >= max_depth_) {
199       return false;
200     }
201     ArtMethod* m = GetMethod();
202     // m may be null if we have inlined methods of unresolved classes. b/27858645
203     if (m != nullptr && !m->IsRuntimeMethod()) {
204       m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
205       trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
206     }
207     return true;
208   }
209 
210  private:
211   const size_t max_depth_;
212   AllocRecordStackTrace* const trace_;
213 };
214 
SetAllocTrackingEnabled(bool enable)215 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
216   Thread* self = Thread::Current();
217   Heap* heap = Runtime::Current()->GetHeap();
218   if (enable) {
219     {
220       MutexLock mu(self, *Locks::alloc_tracker_lock_);
221       if (heap->IsAllocTrackingEnabled()) {
222         return;  // Already enabled, bail.
223       }
224       AllocRecordObjectMap* records = heap->GetAllocationRecords();
225       if (records == nullptr) {
226         records = new AllocRecordObjectMap;
227         heap->SetAllocationRecords(records);
228       }
229       CHECK(records != nullptr);
230       records->SetProperties();
231       std::string self_name;
232       self->GetThreadName(self_name);
233       if (self_name == "JDWP") {
234         records->alloc_ddm_thread_id_ = self->GetTid();
235       }
236       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
237                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
238       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
239                 << records->max_stack_depth_ << " frames, taking up to "
240                 << PrettySize(sz * records->alloc_record_max_) << ")";
241     }
242     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
243     {
244       MutexLock mu(self, *Locks::alloc_tracker_lock_);
245       heap->SetAllocTrackingEnabled(true);
246     }
247   } else {
248     // Delete outside of the critical section to avoid possible lock violations like the runtime
249     // shutdown lock.
250     {
251       MutexLock mu(self, *Locks::alloc_tracker_lock_);
252       if (!heap->IsAllocTrackingEnabled()) {
253         return;  // Already disabled, bail.
254       }
255       heap->SetAllocTrackingEnabled(false);
256       LOG(INFO) << "Disabling alloc tracker";
257       AllocRecordObjectMap* records = heap->GetAllocationRecords();
258       records->Clear();
259     }
260     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
261     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
262   }
263 }
264 
RecordAllocation(Thread * self,ObjPtr<mirror::Object> * obj,size_t byte_count)265 void AllocRecordObjectMap::RecordAllocation(Thread* self,
266                                             ObjPtr<mirror::Object>* obj,
267                                             size_t byte_count) {
268   // Get stack trace outside of lock in case there are allocations during the stack walk.
269   // b/27858645.
270   AllocRecordStackTrace trace;
271   AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
272   {
273     StackHandleScope<1> hs(self);
274     auto obj_wrapper = hs.NewHandleWrapper(obj);
275     visitor.WalkStack();
276   }
277 
278   MutexLock mu(self, *Locks::alloc_tracker_lock_);
279   Heap* const heap = Runtime::Current()->GetHeap();
280   if (!heap->IsAllocTrackingEnabled()) {
281     // In the process of shutting down recording, bail.
282     return;
283   }
284 
285   // Do not record for DDM thread.
286   if (alloc_ddm_thread_id_ == self->GetTid()) {
287     return;
288   }
289 
290   // Wait for GC's sweeping to complete and allow new records
291   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
292                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
293     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
294     // presence of threads blocking for weak ref access.
295     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
296     new_record_condition_.WaitHoldingLocks(self);
297   }
298 
299   if (!heap->IsAllocTrackingEnabled()) {
300     // Return if the allocation tracking has been disabled while waiting for system weak access
301     // above.
302     return;
303   }
304 
305   DCHECK_LE(Size(), alloc_record_max_);
306 
307   // Erase extra unfilled elements.
308   trace.SetTid(self->GetTid());
309 
310   // Add the record.
311   Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
312   DCHECK_LE(Size(), alloc_record_max_);
313 }
314 
Clear()315 void AllocRecordObjectMap::Clear() {
316   entries_.clear();
317 }
318 
AllocRecordObjectMap()319 AllocRecordObjectMap::AllocRecordObjectMap()
320     : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
321 
322 }  // namespace gc
323 }  // namespace art
324