1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "allocation_record.h"
18
19 #include "art_method-inl.h"
20 #include "base/logging.h" // For VLOG
21 #include "base/pointer_size.h"
22 #include "base/stl_util.h"
23 #include "obj_ptr-inl.h"
24 #include "object_callbacks.h"
25 #include "stack.h"
26 #include "thread-inl.h" // For GetWeakRefAccessEnabled().
27
28 #include <android-base/properties.h>
29
30 namespace art HIDDEN {
31 namespace gc {
32
ComputeLineNumber() const33 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
34 DCHECK(method_ != nullptr);
35 int32_t line_number = method_->GetLineNumFromDexPC(dex_pc_);
36 if (line_number == -1 && !method_->IsProxyMethod()) {
37 // If we failed to map the dex pc to a line number, then most probably there is no debug info.
38 // Make the line_number same as the dex pc - it can be decoded later using a map file.
39 // See b/30183883 and b/228000954.
40 line_number = static_cast<int32_t>(dex_pc_);
41 }
42 return line_number;
43 }
44
GetClassDescriptor(std::string * storage) const45 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
46 // klass_ could contain null only if we implement class unloading.
47 return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
48 }
49
SetMaxStackDepth(size_t max_stack_depth)50 void AllocRecordObjectMap::SetMaxStackDepth(size_t max_stack_depth) {
51 // Log fatal since this should already be checked when calling VMDebug.setAllocTrackerStackDepth.
52 CHECK_LE(max_stack_depth, kMaxSupportedStackDepth)
53 << "Allocation record max stack depth is too large";
54 max_stack_depth_ = max_stack_depth;
55 }
56
~AllocRecordObjectMap()57 AllocRecordObjectMap::~AllocRecordObjectMap() {
58 Clear();
59 }
60
VisitRoots(RootVisitor * visitor)61 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
62 // When we are compacting in userfaultfd GC, the class GC-roots are already
63 // updated in SweepAllocationRecords()->SweepClassObject().
64 if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
65 return;
66 }
67 CHECK_LE(recent_record_max_, alloc_record_max_);
68 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
69 size_t count = recent_record_max_;
70 // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
71 // klass_ fields as strong roots.
72 for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
73 AllocRecord& record = it->second;
74 if (count > 0) {
75 buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
76 --count;
77 }
78 // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
79 // class unloading.
80 for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
81 const AllocRecordStackTraceElement& element = record.StackElement(i);
82 DCHECK(element.GetMethod() != nullptr);
83 element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
84 }
85 }
86 }
87
SweepClassObject(AllocRecord * record,IsMarkedVisitor * visitor)88 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
89 REQUIRES_SHARED(Locks::mutator_lock_)
90 REQUIRES(Locks::alloc_tracker_lock_) {
91 GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
92 // This does not need a read barrier because this is called by GC.
93 mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
94 if (old_object != nullptr) {
95 // The class object can become null if we implement class unloading.
96 // In that case we might still want to keep the class name string (not implemented).
97 mirror::Object* new_object = visitor->IsMarked(old_object);
98 DCHECK(new_object != nullptr);
99 if (UNLIKELY(old_object != new_object)) {
100 // We can't use AsClass() as it uses IsClass in a DCHECK, which expects
101 // the class' contents to be there. This is not the case in userfaultfd
102 // GC.
103 klass = GcRoot<mirror::Class>(ObjPtr<mirror::Class>::DownCast(new_object));
104 }
105 }
106 }
107
SweepAllocationRecords(IsMarkedVisitor * visitor)108 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
109 VLOG(heap) << "Start SweepAllocationRecords()";
110 size_t count_deleted = 0, count_moved = 0, count = 0;
111 // Only the first (size - recent_record_max_) number of records can be deleted.
112 const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
113 for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
114 ++count;
115 // This does not need a read barrier because this is called by GC.
116 mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
117 AllocRecord& record = it->second;
118 mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
119 if (new_object == nullptr) {
120 if (count > delete_bound) {
121 it->first = GcRoot<mirror::Object>(nullptr);
122 SweepClassObject(&record, visitor);
123 ++it;
124 } else {
125 it = entries_.erase(it);
126 ++count_deleted;
127 }
128 } else {
129 if (old_object != new_object) {
130 it->first = GcRoot<mirror::Object>(new_object);
131 ++count_moved;
132 }
133 SweepClassObject(&record, visitor);
134 ++it;
135 }
136 }
137 VLOG(heap) << "Deleted " << count_deleted << " allocation records";
138 VLOG(heap) << "Updated " << count_moved << " allocation records";
139 }
140
AllowNewAllocationRecords()141 void AllocRecordObjectMap::AllowNewAllocationRecords() {
142 CHECK(!gUseReadBarrier);
143 allow_new_record_ = true;
144 new_record_condition_.Broadcast(Thread::Current());
145 }
146
DisallowNewAllocationRecords()147 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
148 CHECK(!gUseReadBarrier);
149 allow_new_record_ = false;
150 }
151
BroadcastForNewAllocationRecords()152 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
153 new_record_condition_.Broadcast(Thread::Current());
154 }
155
SetAllocTrackingEnabled(bool enable)156 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
157 Thread* self = Thread::Current();
158 Heap* heap = Runtime::Current()->GetHeap();
159 if (enable) {
160 {
161 MutexLock mu(self, *Locks::alloc_tracker_lock_);
162 if (heap->IsAllocTrackingEnabled()) {
163 return; // Already enabled, bail.
164 }
165 AllocRecordObjectMap* records = heap->GetAllocationRecords();
166 if (records == nullptr) {
167 records = new AllocRecordObjectMap;
168 heap->SetAllocationRecords(records);
169 }
170 CHECK(records != nullptr);
171 records->SetMaxStackDepth(heap->GetAllocTrackerStackDepth());
172 size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
173 sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
174 LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
175 << records->max_stack_depth_ << " frames, taking up to "
176 << PrettySize(sz * records->alloc_record_max_) << ")";
177 }
178 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
179 {
180 MutexLock mu(self, *Locks::alloc_tracker_lock_);
181 heap->SetAllocTrackingEnabled(true);
182 }
183 } else {
184 // Delete outside of the critical section to avoid possible lock violations like the runtime
185 // shutdown lock.
186 {
187 MutexLock mu(self, *Locks::alloc_tracker_lock_);
188 if (!heap->IsAllocTrackingEnabled()) {
189 return; // Already disabled, bail.
190 }
191 heap->SetAllocTrackingEnabled(false);
192 LOG(INFO) << "Disabling alloc tracker";
193 AllocRecordObjectMap* records = heap->GetAllocationRecords();
194 records->Clear();
195 }
196 // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
197 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
198 }
199 }
200
RecordAllocation(Thread * self,ObjPtr<mirror::Object> * obj,size_t byte_count)201 void AllocRecordObjectMap::RecordAllocation(Thread* self,
202 ObjPtr<mirror::Object>* obj,
203 size_t byte_count) {
204 // Get stack trace outside of lock in case there are allocations during the stack walk.
205 // b/27858645.
206 AllocRecordStackTrace trace;
207 {
208 StackHandleScope<1> hs(self);
209 auto obj_wrapper = hs.NewHandleWrapper(obj);
210
211 StackVisitor::WalkStack(
212 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
213 if (trace.GetDepth() >= max_stack_depth_) {
214 return false;
215 }
216 ArtMethod* m = stack_visitor->GetMethod();
217 // m may be null if we have inlined methods of unresolved classes. b/27858645
218 if (m != nullptr && !m->IsRuntimeMethod()) {
219 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
220 trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
221 }
222 return true;
223 },
224 self,
225 /* context= */ nullptr,
226 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
227 }
228
229 MutexLock mu(self, *Locks::alloc_tracker_lock_);
230 Heap* const heap = Runtime::Current()->GetHeap();
231 if (!heap->IsAllocTrackingEnabled()) {
232 // In the process of shutting down recording, bail.
233 return;
234 }
235
236 // TODO Skip recording allocations associated with DDMS. This was a feature of the old debugger
237 // but when we switched to the JVMTI based debugger the feature was (unintentionally) broken.
238 // Since nobody seemed to really notice or care it might not be worth the trouble.
239
240 // Wait for GC's sweeping to complete and allow new records.
241 while (UNLIKELY((!gUseReadBarrier && !allow_new_record_) ||
242 (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
243 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
244 // presence of threads blocking for weak ref access.
245 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
246 new_record_condition_.WaitHoldingLocks(self);
247 }
248
249 if (!heap->IsAllocTrackingEnabled()) {
250 // Return if the allocation tracking has been disabled while waiting for system weak access
251 // above.
252 return;
253 }
254
255 DCHECK_LE(Size(), alloc_record_max_);
256
257 // Erase extra unfilled elements.
258 trace.SetTid(self->GetTid());
259
260 // Add the record.
261 Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
262 DCHECK_LE(Size(), alloc_record_max_);
263 }
264
Clear()265 void AllocRecordObjectMap::Clear() {
266 entries_.clear();
267 }
268
AllocRecordObjectMap()269 AllocRecordObjectMap::AllocRecordObjectMap()
270 : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
271
272 } // namespace gc
273 } // namespace art
274