1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger.h"
18
19 #include <sys/uio.h>
20
21 #include <functional>
22 #include <memory>
23 #include <set>
24 #include <vector>
25
26 #include "android-base/macros.h"
27 #include "android-base/stringprintf.h"
28
29 #include "arch/context.h"
30 #include "art_field-inl.h"
31 #include "art_method-inl.h"
32 #include "base/endian_utils.h"
33 #include "base/logging.h"
34 #include "base/memory_tool.h"
35 #include "base/pointer_size.h"
36 #include "base/safe_map.h"
37 #include "base/strlcpy.h"
38 #include "base/time_utils.h"
39 #include "class_linker-inl.h"
40 #include "class_linker.h"
41 #include "dex/descriptors_names.h"
42 #include "dex/dex_file-inl.h"
43 #include "dex/dex_file_annotations.h"
44 #include "dex/dex_file_types.h"
45 #include "dex/dex_instruction.h"
46 #include "dex/utf.h"
47 #include "entrypoints/runtime_asm_entrypoints.h"
48 #include "gc/accounting/card_table-inl.h"
49 #include "gc/allocation_record.h"
50 #include "gc/gc_cause.h"
51 #include "gc/scoped_gc_critical_section.h"
52 #include "gc/space/bump_pointer_space-walk-inl.h"
53 #include "gc/space/large_object_space.h"
54 #include "gc/space/space-inl.h"
55 #include "handle_scope-inl.h"
56 #include "instrumentation.h"
57 #include "jni/jni_internal.h"
58 #include "jvalue-inl.h"
59 #include "mirror/array-alloc-inl.h"
60 #include "mirror/class-alloc-inl.h"
61 #include "mirror/class-inl.h"
62 #include "mirror/class.h"
63 #include "mirror/class_loader.h"
64 #include "mirror/object-inl.h"
65 #include "mirror/object_array-inl.h"
66 #include "mirror/string-alloc-inl.h"
67 #include "mirror/string-inl.h"
68 #include "mirror/throwable.h"
69 #include "nativehelper/scoped_local_ref.h"
70 #include "nativehelper/scoped_primitive_array.h"
71 #include "oat/oat_file.h"
72 #include "obj_ptr-inl.h"
73 #include "reflection.h"
74 #include "reflective_handle.h"
75 #include "reflective_handle_scope-inl.h"
76 #include "runtime-inl.h"
77 #include "runtime_callbacks.h"
78 #include "scoped_thread_state_change-inl.h"
79 #include "scoped_thread_state_change.h"
80 #include "stack.h"
81 #include "thread.h"
82 #include "thread_list.h"
83 #include "thread_pool.h"
84 #include "well_known_classes.h"
85
86 namespace art HIDDEN {
87
88 using android::base::StringPrintf;
89
90 // Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
CappedAllocRecordCount(size_t alloc_record_count)91 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
92 const size_t cap = 0xffff;
93 if (alloc_record_count > cap) {
94 return cap;
95 }
96 return alloc_record_count;
97 }
98
99 // JDWP is allowed unless the Zygote forbids it.
100 static bool gJdwpAllowed = true;
101
102 static bool gDdmThreadNotification = false;
103
104 // DDMS GC-related settings.
105 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
106 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
107 static Dbg::HpsgWhat gDdmHpsgWhat;
108 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
109 static Dbg::HpsgWhat gDdmNhsgWhat;
110
111 Dbg::DbgThreadLifecycleCallback Dbg::thread_lifecycle_callback_;
112
GcDidFinish()113 void Dbg::GcDidFinish() {
114 if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
115 ScopedObjectAccess soa(Thread::Current());
116 VLOG(jdwp) << "Sending heap info to DDM";
117 DdmSendHeapInfo(gDdmHpifWhen);
118 }
119 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
120 ScopedObjectAccess soa(Thread::Current());
121 VLOG(jdwp) << "Dumping heap to DDM";
122 DdmSendHeapSegments(false);
123 }
124 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
125 ScopedObjectAccess soa(Thread::Current());
126 VLOG(jdwp) << "Dumping native heap to DDM";
127 DdmSendHeapSegments(true);
128 }
129 }
130
SetJdwpAllowed(bool allowed)131 void Dbg::SetJdwpAllowed(bool allowed) {
132 gJdwpAllowed = allowed;
133 }
134
IsJdwpAllowed()135 bool Dbg::IsJdwpAllowed() {
136 return gJdwpAllowed;
137 }
138
139 // Do we need to deoptimize the stack to handle an exception?
IsForcedInterpreterNeededForExceptionImpl(Thread * thread)140 bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
141 // Deoptimization is required if at least one method in the stack needs it. However we
142 // skip frames that will be unwound (thus not executed).
143 bool needs_deoptimization = false;
144 StackVisitor::WalkStack(
145 [&](art::StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
146 // The visitor is meant to be used when handling exception from compiled code only.
147 CHECK(!visitor->IsShadowFrame()) << "We only expect to visit compiled frame: "
148 << ArtMethod::PrettyMethod(visitor->GetMethod());
149 ArtMethod* method = visitor->GetMethod();
150 if (method == nullptr) {
151 // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
152 // so we can stop the visit.
153 DCHECK(!needs_deoptimization);
154 return false;
155 }
156 if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
157 // We found a compiled frame in the stack but instrumentation is set to interpret
158 // everything: we need to deoptimize.
159 needs_deoptimization = true;
160 return false;
161 }
162 if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
163 // We found a deoptimized method in the stack.
164 needs_deoptimization = true;
165 return false;
166 }
167 ShadowFrame* frame = visitor->GetThread()->FindDebuggerShadowFrame(visitor->GetFrameId());
168 if (frame != nullptr) {
169 // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
170 // deoptimize the stack to execute (and deallocate) this frame.
171 needs_deoptimization = true;
172 return false;
173 }
174 return true;
175 },
176 thread,
177 /* context= */ nullptr,
178 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
179 /* check_suspended */ true,
180 /* include_transitions */ true);
181 return needs_deoptimization;
182 }
183
184
DdmHandleChunk(JNIEnv * env,uint32_t type,const ArrayRef<const jbyte> & data,uint32_t * out_type,std::vector<uint8_t> * out_data)185 bool Dbg::DdmHandleChunk(JNIEnv* env,
186 uint32_t type,
187 const ArrayRef<const jbyte>& data,
188 /*out*/uint32_t* out_type,
189 /*out*/std::vector<uint8_t>* out_data) {
190 ScopedObjectAccess soa(env);
191 StackHandleScope<1u> hs(soa.Self());
192 Handle<mirror::ByteArray> data_array =
193 hs.NewHandle(mirror::ByteArray::Alloc(soa.Self(), data.size()));
194 if (data_array == nullptr) {
195 LOG(WARNING) << "byte[] allocation failed: " << data.size();
196 env->ExceptionClear();
197 return false;
198 }
199 memcpy(data_array->GetData(), data.data(), data.size());
200 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
201 ArtMethod* dispatch = WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch;
202 ObjPtr<mirror::Object> chunk = dispatch->InvokeStatic<'L', 'I', 'L', 'I', 'I'>(
203 soa.Self(), type, data_array.Get(), 0, static_cast<jint>(data.size()));
204 if (soa.Self()->IsExceptionPending()) {
205 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type) << std::endl
206 << soa.Self()->GetException()->Dump();
207 soa.Self()->ClearException();
208 return false;
209 }
210
211 if (chunk == nullptr) {
212 return false;
213 }
214
215 /*
216 * Pull the pieces out of the chunk. We copy the results into a
217 * newly-allocated buffer that the caller can free. We don't want to
218 * continue using the Chunk object because nothing has a reference to it.
219 *
220 * We could avoid this by returning type/data/offset/length and having
221 * the caller be aware of the object lifetime issues, but that
222 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
223 * if we have responses for multiple chunks.
224 *
225 * So we're pretty much stuck with copying data around multiple times.
226 */
227 ObjPtr<mirror::ByteArray> reply_data = ObjPtr<mirror::ByteArray>::DownCast(
228 WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data->GetObject(chunk));
229 jint offset = WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset->GetInt(chunk);
230 jint length = WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length->GetInt(chunk);
231 *out_type = WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type->GetInt(chunk);
232
233 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d",
234 type,
235 reply_data.Ptr(),
236 offset,
237 length);
238
239 if (reply_data == nullptr) {
240 LOG(INFO) << "Null reply data";
241 return false;
242 }
243
244 jint reply_length = reply_data->GetLength();
245 if (offset < 0 || offset > reply_length || length < 0 || length > reply_length - offset) {
246 LOG(INFO) << "Invalid reply data range: offset=" << offset << ", length=" << length
247 << " reply_length=" << reply_length;
248 return false;
249 }
250
251 out_data->resize(length);
252 memcpy(out_data->data(), reply_data->GetData() + offset, length);
253
254 return true;
255 }
256
DdmBroadcast(bool connect)257 void Dbg::DdmBroadcast(bool connect) {
258 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
259
260 Thread* self = Thread::Current();
261 if (self->GetState() != ThreadState::kRunnable) {
262 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
263 /* try anyway? */
264 }
265
266 // TODO: Can we really get here while not `Runnable`? If not, we do not need the `soa`.
267 ScopedObjectAccessUnchecked soa(self);
268 JNIEnv* env = self->GetJniEnv();
269 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
270 ArtMethod* broadcast = WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast;
271 broadcast->InvokeStatic<'V', 'I'>(self, event);
272 if (self->IsExceptionPending()) {
273 LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
274 env->ExceptionDescribe();
275 env->ExceptionClear();
276 }
277 }
278
DdmConnected()279 void Dbg::DdmConnected() {
280 Dbg::DdmBroadcast(true);
281 }
282
DdmDisconnected()283 void Dbg::DdmDisconnected() {
284 Dbg::DdmBroadcast(false);
285 gDdmThreadNotification = false;
286 }
287
288
289 /*
290 * Send a notification when a thread starts, stops, or changes its name.
291 *
292 * Because we broadcast the full set of threads when the notifications are
293 * first enabled, it's possible for "thread" to be actively executing.
294 */
DdmSendThreadNotification(Thread * t,uint32_t type)295 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
296 Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
297 if (!gDdmThreadNotification) {
298 return;
299 }
300
301 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
302 if (type == CHUNK_TYPE("THDE")) {
303 uint8_t buf[4];
304 Set4BE(&buf[0], t->GetThreadId());
305 cb->DdmPublishChunk(CHUNK_TYPE("THDE"), ArrayRef<const uint8_t>(buf));
306 } else {
307 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
308 StackHandleScope<1> hs(Thread::Current());
309 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName()));
310 size_t char_count = (name != nullptr) ? name->GetLength() : 0;
311 const jchar* chars = (name != nullptr) ? name->GetValue() : nullptr;
312 bool is_compressed = (name != nullptr) ? name->IsCompressed() : false;
313
314 std::vector<uint8_t> bytes;
315 Append4BE(bytes, t->GetThreadId());
316 if (is_compressed) {
317 const uint8_t* chars_compressed = name->GetValueCompressed();
318 AppendUtf16CompressedBE(bytes, chars_compressed, char_count);
319 } else {
320 AppendUtf16BE(bytes, chars, char_count);
321 }
322 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
323 cb->DdmPublishChunk(type, ArrayRef<const uint8_t>(bytes));
324 }
325 }
326
DdmSetThreadNotification(bool enable)327 void Dbg::DdmSetThreadNotification(bool enable) {
328 // Enable/disable thread notifications.
329 gDdmThreadNotification = enable;
330 if (enable) {
331 // Use a Checkpoint to cause every currently running thread to send their own notification when
332 // able. We then wait for every thread thread active at the time to post the creation
333 // notification. Threads created later will send this themselves.
334 Thread* self = Thread::Current();
335 ScopedObjectAccess soa(self);
336 Barrier finish_barrier(0);
337 FunctionClosure fc([&](Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
338 Thread* cls_self = Thread::Current();
339 Locks::mutator_lock_->AssertSharedHeld(cls_self);
340 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
341 finish_barrier.Pass(cls_self);
342 });
343 // TODO(b/253671779): The above eventually results in calls to EventHandler::DispatchEvent,
344 // which does a ScopedThreadStateChange, which amounts to a thread state change inside the
345 // checkpoint run method. Hence the normal check would fail, and thus we specify Unchecked
346 // here.
347 size_t checkpoints = Runtime::Current()->GetThreadList()->RunCheckpointUnchecked(&fc);
348 ScopedThreadSuspension sts(self, ThreadState::kWaitingForCheckPointsToRun);
349 finish_barrier.Increment(self, checkpoints);
350 }
351 }
352
PostThreadStartOrStop(Thread * t,uint32_t type)353 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
354 Dbg::DdmSendThreadNotification(t, type);
355 }
356
PostThreadStart(Thread * t)357 void Dbg::PostThreadStart(Thread* t) {
358 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
359 }
360
PostThreadDeath(Thread * t)361 void Dbg::PostThreadDeath(Thread* t) {
362 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
363 }
364
DdmHandleHpifChunk(HpifWhen when)365 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
366 if (when == HPIF_WHEN_NOW) {
367 DdmSendHeapInfo(when);
368 return 1;
369 }
370
371 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
372 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
373 return 0;
374 }
375
376 gDdmHpifWhen = when;
377 return 1;
378 }
379
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)380 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
381 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
382 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
383 return false;
384 }
385
386 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
387 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
388 return false;
389 }
390
391 if (native) {
392 gDdmNhsgWhen = when;
393 gDdmNhsgWhat = what;
394 } else {
395 gDdmHpsgWhen = when;
396 gDdmHpsgWhat = what;
397 }
398 return true;
399 }
400
DdmSendHeapInfo(HpifWhen reason)401 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
402 // If there's a one-shot 'when', reset it.
403 if (reason == gDdmHpifWhen) {
404 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
405 gDdmHpifWhen = HPIF_WHEN_NEVER;
406 }
407 }
408
409 /*
410 * Chunk HPIF (client --> server)
411 *
412 * Heap Info. General information about the heap,
413 * suitable for a summary display.
414 *
415 * [u4]: number of heaps
416 *
417 * For each heap:
418 * [u4]: heap ID
419 * [u8]: timestamp in ms since Unix epoch
420 * [u1]: capture reason (same as 'when' value from server)
421 * [u4]: max heap size in bytes (-Xmx)
422 * [u4]: current heap size in bytes
423 * [u4]: current number of bytes allocated
424 * [u4]: current number of objects allocated
425 */
426 uint8_t heap_count = 1;
427 gc::Heap* heap = Runtime::Current()->GetHeap();
428 std::vector<uint8_t> bytes;
429 Append4BE(bytes, heap_count);
430 Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
431 Append8BE(bytes, MilliTime());
432 Append1BE(bytes, reason);
433 Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes.
434 Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes.
435 Append4BE(bytes, heap->GetBytesAllocated());
436 Append4BE(bytes, heap->GetObjectsAllocated());
437 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
438 Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("HPIF"),
439 ArrayRef<const uint8_t>(bytes));
440 }
441
442 enum HpsgSolidity {
443 SOLIDITY_FREE = 0,
444 SOLIDITY_HARD = 1,
445 SOLIDITY_SOFT = 2,
446 SOLIDITY_WEAK = 3,
447 SOLIDITY_PHANTOM = 4,
448 SOLIDITY_FINALIZABLE = 5,
449 SOLIDITY_SWEEP = 6,
450 };
451
452 enum HpsgKind {
453 KIND_OBJECT = 0,
454 KIND_CLASS_OBJECT = 1,
455 KIND_ARRAY_1 = 2,
456 KIND_ARRAY_2 = 3,
457 KIND_ARRAY_4 = 4,
458 KIND_ARRAY_8 = 5,
459 KIND_UNKNOWN = 6,
460 KIND_NATIVE = 7,
461 };
462
463 #define HPSG_PARTIAL (1<<7)
464 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
465
466 class HeapChunkContext {
467 public:
468 // Maximum chunk size. Obtain this from the formula:
469 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)470 HeapChunkContext(bool merge, bool native)
471 : buf_(16384 - 16),
472 type_(0),
473 chunk_overhead_(0) {
474 Reset();
475 if (native) {
476 type_ = CHUNK_TYPE("NHSG");
477 } else {
478 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
479 }
480 }
481
~HeapChunkContext()482 ~HeapChunkContext() {
483 if (p_ > &buf_[0]) {
484 Flush();
485 }
486 }
487
SetChunkOverhead(size_t chunk_overhead)488 void SetChunkOverhead(size_t chunk_overhead) {
489 chunk_overhead_ = chunk_overhead;
490 }
491
ResetStartOfNextChunk()492 void ResetStartOfNextChunk() {
493 startOfNextMemoryChunk_ = nullptr;
494 }
495
EnsureHeader(const void * chunk_ptr)496 void EnsureHeader(const void* chunk_ptr) {
497 if (!needHeader_) {
498 return;
499 }
500
501 // Start a new HPSx chunk.
502 Write4BE(&p_, 1); // Heap id (bogus; we only have one heap).
503 Write1BE(&p_, 8); // Size of allocation unit, in bytes.
504
505 Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start.
506 Write4BE(&p_, 0); // offset of this piece (relative to the virtual address).
507 // [u4]: length of piece, in allocation units
508 // We won't know this until we're done, so save the offset and stuff in a fake value.
509 pieceLenField_ = p_;
510 Write4BE(&p_, 0x55555555);
511 needHeader_ = false;
512 }
513
Flush()514 void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
515 if (pieceLenField_ == nullptr) {
516 // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
517 CHECK(needHeader_);
518 return;
519 }
520 // Patch the "length of piece" field.
521 CHECK_LE(&buf_[0], pieceLenField_);
522 CHECK_LE(pieceLenField_, p_);
523 Set4BE(pieceLenField_, totalAllocationUnits_);
524
525 ArrayRef<const uint8_t> out(&buf_[0], p_ - &buf_[0]);
526 Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(type_, out);
527 Reset();
528 }
529
HeapChunkJavaCallback(void * start,void * end,size_t used_bytes,void * arg)530 static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
531 REQUIRES_SHARED(Locks::heap_bitmap_lock_,
532 Locks::mutator_lock_) {
533 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
534 }
535
HeapChunkNativeCallback(void * start,void * end,size_t used_bytes,void * arg)536 static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
537 REQUIRES_SHARED(Locks::mutator_lock_) {
538 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
539 }
540
541 private:
542 enum { ALLOCATION_UNIT_SIZE = 8 };
543
Reset()544 void Reset() {
545 p_ = &buf_[0];
546 ResetStartOfNextChunk();
547 totalAllocationUnits_ = 0;
548 needHeader_ = true;
549 pieceLenField_ = nullptr;
550 }
551
IsNative() const552 bool IsNative() const {
553 return type_ == CHUNK_TYPE("NHSG");
554 }
555
556 // Returns true if the object is not an empty chunk.
ProcessRecord(void * start,size_t used_bytes)557 bool ProcessRecord(void* start, size_t used_bytes) REQUIRES_SHARED(Locks::mutator_lock_) {
558 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
559 // in the following code not to allocate memory, by ensuring buf_ is of the correct size
560 if (used_bytes == 0) {
561 if (start == nullptr) {
562 // Reset for start of new heap.
563 startOfNextMemoryChunk_ = nullptr;
564 Flush();
565 }
566 // Only process in use memory so that free region information
567 // also includes dlmalloc book keeping.
568 return false;
569 }
570 if (startOfNextMemoryChunk_ != nullptr) {
571 // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
572 // of the use of mmaps, so don't report. If not free memory then start a new segment.
573 bool flush = true;
574 if (start > startOfNextMemoryChunk_) {
575 const size_t kMaxFreeLen = 2 * gPageSize;
576 void* free_start = startOfNextMemoryChunk_;
577 void* free_end = start;
578 const size_t free_len =
579 reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
580 if (!IsNative() || free_len < kMaxFreeLen) {
581 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
582 flush = false;
583 }
584 }
585 if (flush) {
586 startOfNextMemoryChunk_ = nullptr;
587 Flush();
588 }
589 }
590 return true;
591 }
592
HeapChunkNativeCallback(void * start,void *,size_t used_bytes)593 void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
594 REQUIRES_SHARED(Locks::mutator_lock_) {
595 if (ProcessRecord(start, used_bytes)) {
596 uint8_t state = ExamineNativeObject(start);
597 AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true);
598 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
599 }
600 }
601
HeapChunkJavaCallback(void * start,void *,size_t used_bytes)602 void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
603 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
604 if (ProcessRecord(start, used_bytes)) {
605 // Determine the type of this chunk.
606 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
607 // If it's the same, we should combine them.
608 uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
609 AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false);
610 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
611 }
612 }
613
AppendChunk(uint8_t state,void * ptr,size_t length,bool is_native)614 void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
615 REQUIRES_SHARED(Locks::mutator_lock_) {
616 // Make sure there's enough room left in the buffer.
617 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
618 // 17 bytes for any header.
619 const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
620 size_t byte_left = &buf_.back() - p_;
621 if (byte_left < needed) {
622 if (is_native) {
623 // Cannot trigger memory allocation while walking native heap.
624 return;
625 }
626 Flush();
627 }
628
629 byte_left = &buf_.back() - p_;
630 if (byte_left < needed) {
631 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
632 << needed << " bytes)";
633 return;
634 }
635 EnsureHeader(ptr);
636 // Write out the chunk description.
637 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units.
638 totalAllocationUnits_ += length;
639 while (length > 256) {
640 *p_++ = state | HPSG_PARTIAL;
641 *p_++ = 255; // length - 1
642 length -= 256;
643 }
644 *p_++ = state;
645 *p_++ = length - 1;
646 }
647
ExamineNativeObject(const void * p)648 uint8_t ExamineNativeObject(const void* p) REQUIRES_SHARED(Locks::mutator_lock_) {
649 return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
650 }
651
ExamineJavaObject(ObjPtr<mirror::Object> o)652 uint8_t ExamineJavaObject(ObjPtr<mirror::Object> o)
653 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
654 if (o == nullptr) {
655 return HPSG_STATE(SOLIDITY_FREE, 0);
656 }
657 // It's an allocated chunk. Figure out what it is.
658 gc::Heap* heap = Runtime::Current()->GetHeap();
659 if (!heap->IsLiveObjectLocked(o)) {
660 LOG(ERROR) << "Invalid object in managed heap: " << o;
661 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
662 }
663 ObjPtr<mirror::Class> c = o->GetClass();
664 if (c == nullptr) {
665 // The object was probably just created but hasn't been initialized yet.
666 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
667 }
668 if (!heap->IsValidObjectAddress(c.Ptr())) {
669 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
670 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
671 }
672 if (c->GetClass() == nullptr) {
673 LOG(ERROR) << "Null class of class " << c << " for object " << o;
674 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
675 }
676 if (c->IsClassClass()) {
677 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
678 }
679 if (c->IsArrayClass()) {
680 switch (c->GetComponentSize()) {
681 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
682 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
683 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
684 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
685 }
686 }
687 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
688 }
689
690 std::vector<uint8_t> buf_;
691 uint8_t* p_;
692 uint8_t* pieceLenField_;
693 void* startOfNextMemoryChunk_;
694 size_t totalAllocationUnits_;
695 uint32_t type_;
696 bool needHeader_;
697 size_t chunk_overhead_;
698
699 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
700 };
701
702
DdmSendHeapSegments(bool native)703 void Dbg::DdmSendHeapSegments(bool native) {
704 Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
705 Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
706 if (when == HPSG_WHEN_NEVER) {
707 return;
708 }
709 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
710 // Figure out what kind of chunks we'll be sending.
711 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
712 << static_cast<int>(what);
713
714 // First, send a heap start chunk.
715 uint8_t heap_id[4];
716 Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
717 cb->DdmPublishChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
718 ArrayRef<const uint8_t>(heap_id));
719 Thread* self = Thread::Current();
720 Locks::mutator_lock_->AssertSharedHeld(self);
721
722 // Send a series of heap segment chunks.
723 HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
724 auto bump_pointer_space_visitor = [&](mirror::Object* obj)
725 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
726 const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
727 HeapChunkContext::HeapChunkJavaCallback(
728 obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, &context);
729 };
730 if (native) {
731 UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
732 } else {
733 gc::Heap* heap = Runtime::Current()->GetHeap();
734 for (const auto& space : heap->GetContinuousSpaces()) {
735 if (space->IsDlMallocSpace()) {
736 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
737 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
738 // allocation then the first sizeof(size_t) may belong to it.
739 context.SetChunkOverhead(sizeof(size_t));
740 space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
741 } else if (space->IsRosAllocSpace()) {
742 context.SetChunkOverhead(0);
743 // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
744 // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
745 ScopedThreadSuspension sts(self, ThreadState::kSuspended);
746 ScopedSuspendAll ssa(__FUNCTION__);
747 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
748 space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
749 } else if (space->IsBumpPointerSpace()) {
750 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
751 context.SetChunkOverhead(0);
752 space->AsBumpPointerSpace()->Walk(bump_pointer_space_visitor);
753 HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
754 } else if (space->IsRegionSpace()) {
755 heap->IncrementDisableMovingGC(self);
756 {
757 ScopedThreadSuspension sts(self, ThreadState::kSuspended);
758 ScopedSuspendAll ssa(__FUNCTION__);
759 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
760 context.SetChunkOverhead(0);
761 space->AsRegionSpace()->Walk(bump_pointer_space_visitor);
762 HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
763 }
764 heap->DecrementDisableMovingGC(self);
765 } else {
766 UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
767 }
768 context.ResetStartOfNextChunk();
769 }
770 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
771 // Walk the large objects, these are not in the AllocSpace.
772 context.SetChunkOverhead(0);
773 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
774 }
775
776 // Finally, send a heap end chunk.
777 cb->DdmPublishChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
778 ArrayRef<const uint8_t>(heap_id));
779 }
780
SetAllocTrackingEnabled(bool enable)781 void Dbg::SetAllocTrackingEnabled(bool enable) {
782 gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
783 }
784
785 class StringTable {
786 private:
787 struct Entry {
Entryart::StringTable::Entry788 explicit Entry(const char* data_in)
789 : data(data_in), hash(ComputeModifiedUtf8Hash(data_in)), index(0) {
790 }
791 Entry(const Entry& entry) = default;
792 Entry(Entry&& entry) = default;
793
794 // Pointer to the actual string data.
795 const char* data;
796
797 // The hash of the data.
798 const uint32_t hash;
799
800 // The index. This will be filled in on Finish and is not part of the ordering, so mark it
801 // mutable.
802 mutable uint32_t index;
803
operator ==art::StringTable::Entry804 bool operator==(const Entry& other) const {
805 return strcmp(data, other.data) == 0;
806 }
807 };
808 struct EntryHash {
operator ()art::StringTable::EntryHash809 size_t operator()(const Entry& entry) const {
810 return entry.hash;
811 }
812 };
813
814 public:
StringTable()815 StringTable() : finished_(false) {
816 }
817
Add(const char * str,bool copy_string)818 void Add(const char* str, bool copy_string) {
819 DCHECK(!finished_);
820 if (UNLIKELY(copy_string)) {
821 // Check whether it's already there.
822 Entry entry(str);
823 if (table_.find(entry) != table_.end()) {
824 return;
825 }
826
827 // Make a copy.
828 size_t str_len = strlen(str);
829 char* copy = new char[str_len + 1];
830 strlcpy(copy, str, str_len + 1);
831 string_backup_.emplace_back(copy);
832 str = copy;
833 }
834 Entry entry(str);
835 table_.insert(entry);
836 }
837
838 // Update all entries and give them an index. Note that this is likely not the insertion order,
839 // as the set will with high likelihood reorder elements. Thus, Add must not be called after
840 // Finish, and Finish must be called before IndexOf. In that case, WriteTo will walk in
841 // the same order as Finish, and indices will agree. The order invariant, as well as indices,
842 // are enforced through debug checks.
Finish()843 void Finish() {
844 DCHECK(!finished_);
845 finished_ = true;
846 uint32_t index = 0;
847 for (auto& entry : table_) {
848 entry.index = index;
849 ++index;
850 }
851 }
852
IndexOf(const char * s) const853 size_t IndexOf(const char* s) const {
854 DCHECK(finished_);
855 Entry entry(s);
856 auto it = table_.find(entry);
857 if (it == table_.end()) {
858 LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
859 }
860 return it->index;
861 }
862
Size() const863 size_t Size() const {
864 return table_.size();
865 }
866
WriteTo(std::vector<uint8_t> & bytes) const867 void WriteTo(std::vector<uint8_t>& bytes) const {
868 DCHECK(finished_);
869 uint32_t cur_index = 0;
870 for (const auto& entry : table_) {
871 DCHECK_EQ(cur_index++, entry.index);
872
873 size_t s_len = CountModifiedUtf8Chars(entry.data);
874 std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
875 ConvertModifiedUtf8ToUtf16(s_utf16.get(), entry.data);
876 AppendUtf16BE(bytes, s_utf16.get(), s_len);
877 }
878 }
879
880 private:
881 std::unordered_set<Entry, EntryHash> table_;
882 std::vector<std::unique_ptr<char[]>> string_backup_;
883
884 bool finished_;
885
886 DISALLOW_COPY_AND_ASSIGN(StringTable);
887 };
888
889
GetMethodSourceFile(ArtMethod * method)890 static const char* GetMethodSourceFile(ArtMethod* method)
891 REQUIRES_SHARED(Locks::mutator_lock_) {
892 DCHECK(method != nullptr);
893 const char* source_file = method->GetDeclaringClassSourceFile();
894 return (source_file != nullptr) ? source_file : "";
895 }
896
897 /*
898 * The data we send to DDMS contains everything we have recorded.
899 *
900 * Message header (all values big-endian):
901 * (1b) message header len (to allow future expansion); includes itself
902 * (1b) entry header len
903 * (1b) stack frame len
904 * (2b) number of entries
905 * (4b) offset to string table from start of message
906 * (2b) number of class name strings
907 * (2b) number of method name strings
908 * (2b) number of source file name strings
909 * For each entry:
910 * (4b) total allocation size
911 * (2b) thread id
912 * (2b) allocated object's class name index
913 * (1b) stack depth
914 * For each stack frame:
915 * (2b) method's class name
916 * (2b) method name
917 * (2b) method source file
918 * (2b) line number, clipped to 32767; -2 if native; -1 if no source
919 * (xb) class name strings
920 * (xb) method name strings
921 * (xb) source file strings
922 *
923 * As with other DDM traffic, strings are sent as a 4-byte length
924 * followed by UTF-16 data.
925 *
926 * We send up 16-bit unsigned indexes into string tables. In theory there
927 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
928 * each table, but in practice there should be far fewer.
929 *
930 * The chief reason for using a string table here is to keep the size of
931 * the DDMS message to a minimum. This is partly to make the protocol
932 * efficient, but also because we have to form the whole thing up all at
933 * once in a memory buffer.
934 *
935 * We use separate string tables for class names, method names, and source
936 * files to keep the indexes small. There will generally be no overlap
937 * between the contents of these tables.
938 */
GetRecentAllocations()939 jbyteArray Dbg::GetRecentAllocations() {
940 if ((false)) {
941 DumpRecentAllocations();
942 }
943
944 Thread* self = Thread::Current();
945 std::vector<uint8_t> bytes;
946 {
947 MutexLock mu(self, *Locks::alloc_tracker_lock_);
948 gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
949 // In case this method is called when allocation tracker is not enabled,
950 // we should still send some data back.
951 gc::AllocRecordObjectMap fallback_record_map;
952 if (records == nullptr) {
953 CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
954 records = &fallback_record_map;
955 }
956 // We don't need to wait on the condition variable records->new_record_condition_, because this
957 // function only reads the class objects, which are already marked so it doesn't change their
958 // reachability.
959
960 //
961 // Part 1: generate string tables.
962 //
963 StringTable class_names;
964 StringTable method_names;
965 StringTable filenames;
966
967 VLOG(jdwp) << "Collecting StringTables.";
968
969 const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
970 uint16_t count = capped_count;
971 size_t alloc_byte_count = 0;
972 for (auto it = records->RBegin(), end = records->REnd();
973 count > 0 && it != end; count--, it++) {
974 const gc::AllocRecord* record = &it->second;
975 std::string temp;
976 const char* class_descr = record->GetClassDescriptor(&temp);
977 class_names.Add(class_descr, !temp.empty());
978
979 // Size + tid + class name index + stack depth.
980 alloc_byte_count += 4u + 2u + 2u + 1u;
981
982 for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
983 ArtMethod* m = record->StackElement(i).GetMethod();
984 class_names.Add(m->GetDeclaringClassDescriptor(), false);
985 method_names.Add(m->GetName(), false);
986 filenames.Add(GetMethodSourceFile(m), false);
987 }
988
989 // Depth * (class index + method name index + file name index + line number).
990 alloc_byte_count += record->GetDepth() * (2u + 2u + 2u + 2u);
991 }
992
993 class_names.Finish();
994 method_names.Finish();
995 filenames.Finish();
996 VLOG(jdwp) << "Done collecting StringTables:" << std::endl
997 << " ClassNames: " << class_names.Size() << std::endl
998 << " MethodNames: " << method_names.Size() << std::endl
999 << " Filenames: " << filenames.Size();
1000
1001 LOG(INFO) << "recent allocation records: " << capped_count;
1002 LOG(INFO) << "allocation records all objects: " << records->Size();
1003
1004 //
1005 // Part 2: Generate the output and store it in the buffer.
1006 //
1007
1008 // (1b) message header len (to allow future expansion); includes itself
1009 // (1b) entry header len
1010 // (1b) stack frame len
1011 const int kMessageHeaderLen = 15;
1012 const int kEntryHeaderLen = 9;
1013 const int kStackFrameLen = 8;
1014 Append1BE(bytes, kMessageHeaderLen);
1015 Append1BE(bytes, kEntryHeaderLen);
1016 Append1BE(bytes, kStackFrameLen);
1017
1018 // (2b) number of entries
1019 // (4b) offset to string table from start of message
1020 // (2b) number of class name strings
1021 // (2b) number of method name strings
1022 // (2b) number of source file name strings
1023 Append2BE(bytes, capped_count);
1024 size_t string_table_offset = bytes.size();
1025 Append4BE(bytes, 0); // We'll patch this later...
1026 Append2BE(bytes, class_names.Size());
1027 Append2BE(bytes, method_names.Size());
1028 Append2BE(bytes, filenames.Size());
1029
1030 VLOG(jdwp) << "Dumping allocations with stacks";
1031
1032 // Enlarge the vector for the allocation data.
1033 size_t reserve_size = bytes.size() + alloc_byte_count;
1034 bytes.reserve(reserve_size);
1035
1036 std::string temp;
1037 count = capped_count;
1038 // The last "count" number of allocation records in "records" are the most recent "count" number
1039 // of allocations. Reverse iterate to get them. The most recent allocation is sent first.
1040 for (auto it = records->RBegin(), end = records->REnd();
1041 count > 0 && it != end; count--, it++) {
1042 // For each entry:
1043 // (4b) total allocation size
1044 // (2b) thread id
1045 // (2b) allocated object's class name index
1046 // (1b) stack depth
1047 const gc::AllocRecord* record = &it->second;
1048 size_t stack_depth = record->GetDepth();
1049 size_t allocated_object_class_name_index =
1050 class_names.IndexOf(record->GetClassDescriptor(&temp));
1051 Append4BE(bytes, record->ByteCount());
1052 Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
1053 Append2BE(bytes, allocated_object_class_name_index);
1054 Append1BE(bytes, stack_depth);
1055
1056 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
1057 // For each stack frame:
1058 // (2b) method's class name
1059 // (2b) method name
1060 // (2b) method source file
1061 // (2b) line number, clipped to 32767; -2 if native; -1 if no source
1062 ArtMethod* m = record->StackElement(stack_frame).GetMethod();
1063 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
1064 size_t method_name_index = method_names.IndexOf(m->GetName());
1065 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
1066 Append2BE(bytes, class_name_index);
1067 Append2BE(bytes, method_name_index);
1068 Append2BE(bytes, file_name_index);
1069 Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
1070 }
1071 }
1072
1073 CHECK_EQ(bytes.size(), reserve_size);
1074 VLOG(jdwp) << "Dumping tables.";
1075
1076 // (xb) class name strings
1077 // (xb) method name strings
1078 // (xb) source file strings
1079 Set4BE(&bytes[string_table_offset], bytes.size());
1080 class_names.WriteTo(bytes);
1081 method_names.WriteTo(bytes);
1082 filenames.WriteTo(bytes);
1083
1084 VLOG(jdwp) << "GetRecentAllocations: data created. " << bytes.size();
1085 }
1086 JNIEnv* env = self->GetJniEnv();
1087 jbyteArray result = env->NewByteArray(bytes.size());
1088 if (result != nullptr) {
1089 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
1090 }
1091 return result;
1092 }
1093
ThreadStart(Thread * self)1094 void Dbg::DbgThreadLifecycleCallback::ThreadStart(Thread* self) {
1095 Dbg::PostThreadStart(self);
1096 }
1097
ThreadDeath(Thread * self)1098 void Dbg::DbgThreadLifecycleCallback::ThreadDeath(Thread* self) {
1099 Dbg::PostThreadDeath(self);
1100 }
1101
1102 } // namespace art
1103