1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger.h"
18
19 #include <sys/uio.h>
20
21 #include <set>
22
23 #include "arch/context.h"
24 #include "class_linker.h"
25 #include "class_linker-inl.h"
26 #include "dex_file-inl.h"
27 #include "dex_instruction.h"
28 #include "field_helper.h"
29 #include "gc/accounting/card_table-inl.h"
30 #include "gc/space/large_object_space.h"
31 #include "gc/space/space-inl.h"
32 #include "handle_scope.h"
33 #include "jdwp/object_registry.h"
34 #include "method_helper.h"
35 #include "mirror/art_field-inl.h"
36 #include "mirror/art_method-inl.h"
37 #include "mirror/class.h"
38 #include "mirror/class-inl.h"
39 #include "mirror/class_loader.h"
40 #include "mirror/object-inl.h"
41 #include "mirror/object_array-inl.h"
42 #include "mirror/string-inl.h"
43 #include "mirror/throwable.h"
44 #include "quick/inline_method_analyser.h"
45 #include "reflection.h"
46 #include "safe_map.h"
47 #include "scoped_thread_state_change.h"
48 #include "ScopedLocalRef.h"
49 #include "ScopedPrimitiveArray.h"
50 #include "handle_scope-inl.h"
51 #include "thread_list.h"
52 #include "throw_location.h"
53 #include "utf.h"
54 #include "verifier/method_verifier-inl.h"
55 #include "well_known_classes.h"
56
57 #ifdef HAVE_ANDROID_OS
58 #include "cutils/properties.h"
59 #endif
60
61 namespace art {
62
63 static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
64 static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 2BE can hold 64k-1.
65
66 // Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
CappedAllocRecordCount(size_t alloc_record_count)67 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68 if (alloc_record_count > 0xffff) {
69 return 0xffff;
70 }
71 return alloc_record_count;
72 }
73
74 class AllocRecordStackTraceElement {
75 public:
AllocRecordStackTraceElement()76 AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
77 }
78
LineNumber()79 int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80 mirror::ArtMethod* method = Method();
81 DCHECK(method != nullptr);
82 return method->GetLineNumFromDexPC(DexPc());
83 }
84
Method()85 mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86 ScopedObjectAccessUnchecked soa(Thread::Current());
87 return soa.DecodeMethod(method_);
88 }
89
SetMethod(mirror::ArtMethod * m)90 void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91 ScopedObjectAccessUnchecked soa(Thread::Current());
92 method_ = soa.EncodeMethod(m);
93 }
94
DexPc() const95 uint32_t DexPc() const {
96 return dex_pc_;
97 }
98
SetDexPc(uint32_t pc)99 void SetDexPc(uint32_t pc) {
100 dex_pc_ = pc;
101 }
102
103 private:
104 jmethodID method_;
105 uint32_t dex_pc_;
106 };
107
Add(mirror::Class * t)108 jobject Dbg::TypeCache::Add(mirror::Class* t) {
109 ScopedObjectAccessUnchecked soa(Thread::Current());
110 int32_t hash_code = t->IdentityHashCode();
111 auto range = objects_.equal_range(hash_code);
112 for (auto it = range.first; it != range.second; ++it) {
113 if (soa.Decode<mirror::Class*>(it->second) == t) {
114 // Found a matching weak global, return it.
115 return it->second;
116 }
117 }
118 JNIEnv* env = soa.Env();
119 const jobject local_ref = soa.AddLocalReference<jobject>(t);
120 const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121 env->DeleteLocalRef(local_ref);
122 objects_.insert(std::make_pair(hash_code, weak_global));
123 return weak_global;
124 }
125
Clear()126 void Dbg::TypeCache::Clear() {
127 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128 Thread* self = Thread::Current();
129 for (const auto& p : objects_) {
130 vm->DeleteWeakGlobalRef(self, p.second);
131 }
132 objects_.clear();
133 }
134
135 class AllocRecord {
136 public:
AllocRecord()137 AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
138
Type()139 mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140 return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
141 }
142
SetType(mirror::Class * t)143 void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144 Locks::alloc_tracker_lock_) {
145 type_ = Dbg::type_cache_.Add(t);
146 }
147
GetDepth()148 size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149 size_t depth = 0;
150 while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) {
151 ++depth;
152 }
153 return depth;
154 }
155
ByteCount() const156 size_t ByteCount() const {
157 return byte_count_;
158 }
159
SetByteCount(size_t count)160 void SetByteCount(size_t count) {
161 byte_count_ = count;
162 }
163
ThinLockId() const164 uint16_t ThinLockId() const {
165 return thin_lock_id_;
166 }
167
SetThinLockId(uint16_t id)168 void SetThinLockId(uint16_t id) {
169 thin_lock_id_ = id;
170 }
171
StackElement(size_t index)172 AllocRecordStackTraceElement* StackElement(size_t index) {
173 DCHECK_LT(index, kMaxAllocRecordStackDepth);
174 return &stack_[index];
175 }
176
177 private:
178 jobject type_; // This is a weak global.
179 size_t byte_count_;
180 uint16_t thin_lock_id_;
181 AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have NULL method.
182 };
183
184 class Breakpoint {
185 public:
Breakpoint(mirror::ArtMethod * method,uint32_t dex_pc,DeoptimizationRequest::Kind deoptimization_kind)186 Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc,
187 DeoptimizationRequest::Kind deoptimization_kind)
188 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
189 : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
190 CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
191 deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
192 deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
193 ScopedObjectAccessUnchecked soa(Thread::Current());
194 method_ = soa.EncodeMethod(method);
195 }
196
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)197 Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
198 : method_(nullptr), dex_pc_(other.dex_pc_),
199 deoptimization_kind_(other.deoptimization_kind_) {
200 ScopedObjectAccessUnchecked soa(Thread::Current());
201 method_ = soa.EncodeMethod(other.Method());
202 }
203
Method() const204 mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
205 ScopedObjectAccessUnchecked soa(Thread::Current());
206 return soa.DecodeMethod(method_);
207 }
208
DexPc() const209 uint32_t DexPc() const {
210 return dex_pc_;
211 }
212
GetDeoptimizationKind() const213 DeoptimizationRequest::Kind GetDeoptimizationKind() const {
214 return deoptimization_kind_;
215 }
216
217 private:
218 // The location of this breakpoint.
219 jmethodID method_;
220 uint32_t dex_pc_;
221
222 // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
223 DeoptimizationRequest::Kind deoptimization_kind_;
224 };
225
operator <<(std::ostream & os,const Breakpoint & rhs)226 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
227 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
228 os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
229 return os;
230 }
231
232 class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
233 public:
DebugInstrumentationListener()234 DebugInstrumentationListener() {}
~DebugInstrumentationListener()235 virtual ~DebugInstrumentationListener() {}
236
MethodEntered(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc)237 void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
238 uint32_t dex_pc)
239 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
240 if (method->IsNative()) {
241 // TODO: post location events is a suspension point and native method entry stubs aren't.
242 return;
243 }
244 Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
245 }
246
MethodExited(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc,const JValue & return_value)247 void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
248 uint32_t dex_pc, const JValue& return_value)
249 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
250 if (method->IsNative()) {
251 // TODO: post location events is a suspension point and native method entry stubs aren't.
252 return;
253 }
254 Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
255 }
256
MethodUnwind(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc)257 void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
258 uint32_t dex_pc)
259 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
260 // We're not recorded to listen to this kind of event, so complain.
261 LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
262 << " " << dex_pc;
263 }
264
DexPcMoved(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t new_dex_pc)265 void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
266 uint32_t new_dex_pc)
267 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
268 Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
269 }
270
FieldRead(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc,mirror::ArtField * field)271 void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
272 uint32_t dex_pc, mirror::ArtField* field)
273 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
274 Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
275 }
276
FieldWritten(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc,mirror::ArtField * field,const JValue & field_value)277 void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
278 uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
279 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
280 Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
281 }
282
ExceptionCaught(Thread * thread,const ThrowLocation & throw_location,mirror::ArtMethod * catch_method,uint32_t catch_dex_pc,mirror::Throwable * exception_object)283 void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
284 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
285 mirror::Throwable* exception_object)
286 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
287 Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
288 }
289
290 private:
291 DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
292 } gDebugInstrumentationListener;
293
294 // JDWP is allowed unless the Zygote forbids it.
295 static bool gJdwpAllowed = true;
296
297 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
298 static bool gJdwpConfigured = false;
299
300 // Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
301 static JDWP::JdwpOptions gJdwpOptions;
302
303 // Runtime JDWP state.
304 static JDWP::JdwpState* gJdwpState = NULL;
305 static bool gDebuggerConnected; // debugger or DDMS is connected.
306 static bool gDebuggerActive; // debugger is making requests.
307 static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection.
308
309 static bool gDdmThreadNotification = false;
310
311 // DDMS GC-related settings.
312 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
313 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
314 static Dbg::HpsgWhat gDdmHpsgWhat;
315 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
316 static Dbg::HpsgWhat gDdmNhsgWhat;
317
318 ObjectRegistry* Dbg::gRegistry = nullptr;
319
320 // Recent allocation tracking.
321 AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord>
322 size_t Dbg::alloc_record_max_ = 0;
323 size_t Dbg::alloc_record_head_ = 0;
324 size_t Dbg::alloc_record_count_ = 0;
325 Dbg::TypeCache Dbg::type_cache_;
326
327 // Deoptimization support.
328 std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
329 size_t Dbg::full_deoptimization_event_count_ = 0;
330 size_t Dbg::delayed_full_undeoptimization_count_ = 0;
331
332 // Instrumentation event reference counters.
333 size_t Dbg::dex_pc_change_event_ref_count_ = 0;
334 size_t Dbg::method_enter_event_ref_count_ = 0;
335 size_t Dbg::method_exit_event_ref_count_ = 0;
336 size_t Dbg::field_read_event_ref_count_ = 0;
337 size_t Dbg::field_write_event_ref_count_ = 0;
338 size_t Dbg::exception_catch_event_ref_count_ = 0;
339 uint32_t Dbg::instrumentation_events_ = 0;
340
341 // Breakpoints.
342 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
343
VisitRoots(RootCallback * callback,void * arg,const RootInfo & root_info)344 void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
345 if (receiver != nullptr) {
346 callback(&receiver, arg, root_info);
347 }
348 if (thread != nullptr) {
349 callback(&thread, arg, root_info);
350 }
351 if (klass != nullptr) {
352 callback(reinterpret_cast<mirror::Object**>(&klass), arg, root_info);
353 }
354 if (method != nullptr) {
355 callback(reinterpret_cast<mirror::Object**>(&method), arg, root_info);
356 }
357 }
358
Clear()359 void DebugInvokeReq::Clear() {
360 invoke_needed = false;
361 receiver = nullptr;
362 thread = nullptr;
363 klass = nullptr;
364 method = nullptr;
365 }
366
VisitRoots(RootCallback * callback,void * arg,const RootInfo & root_info)367 void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
368 if (method != nullptr) {
369 callback(reinterpret_cast<mirror::Object**>(&method), arg, root_info);
370 }
371 }
372
ContainsDexPc(uint32_t dex_pc) const373 bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
374 return dex_pcs.find(dex_pc) == dex_pcs.end();
375 }
376
Clear()377 void SingleStepControl::Clear() {
378 is_active = false;
379 method = nullptr;
380 dex_pcs.clear();
381 }
382
IsBreakpoint(const mirror::ArtMethod * m,uint32_t dex_pc)383 static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
384 LOCKS_EXCLUDED(Locks::breakpoint_lock_)
385 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
386 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
387 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
388 if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
389 VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
390 return true;
391 }
392 }
393 return false;
394 }
395
IsSuspendedForDebugger(ScopedObjectAccessUnchecked & soa,Thread * thread)396 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
397 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
398 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
399 // A thread may be suspended for GC; in this code, we really want to know whether
400 // there's a debugger suspension active.
401 return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
402 }
403
DecodeArray(JDWP::RefTypeId id,JDWP::JdwpError & status)404 static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
405 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
406 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
407 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
408 status = JDWP::ERR_INVALID_OBJECT;
409 return NULL;
410 }
411 if (!o->IsArrayInstance()) {
412 status = JDWP::ERR_INVALID_ARRAY;
413 return NULL;
414 }
415 status = JDWP::ERR_NONE;
416 return o->AsArray();
417 }
418
DecodeClass(JDWP::RefTypeId id,JDWP::JdwpError & status)419 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
420 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
421 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
422 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
423 status = JDWP::ERR_INVALID_OBJECT;
424 return NULL;
425 }
426 if (!o->IsClass()) {
427 status = JDWP::ERR_INVALID_CLASS;
428 return NULL;
429 }
430 status = JDWP::ERR_NONE;
431 return o->AsClass();
432 }
433
DecodeThread(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_id,Thread * & thread)434 static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
435 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
436 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
437 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
438 mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id);
439 if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
440 // This isn't even an object.
441 return JDWP::ERR_INVALID_OBJECT;
442 }
443
444 mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
445 if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
446 // This isn't a thread.
447 return JDWP::ERR_INVALID_THREAD;
448 }
449
450 thread = Thread::FromManagedThread(soa, thread_peer);
451 if (thread == NULL) {
452 // This is a java.lang.Thread without a Thread*. Must be a zombie.
453 return JDWP::ERR_THREAD_NOT_ALIVE;
454 }
455 return JDWP::ERR_NONE;
456 }
457
BasicTagFromDescriptor(const char * descriptor)458 static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
459 // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
460 // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
461 return static_cast<JDWP::JdwpTag>(descriptor[0]);
462 }
463
BasicTagFromClass(mirror::Class * klass)464 static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
465 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
466 std::string temp;
467 const char* descriptor = klass->GetDescriptor(&temp);
468 return BasicTagFromDescriptor(descriptor);
469 }
470
TagFromClass(const ScopedObjectAccessUnchecked & soa,mirror::Class * c)471 static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
472 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
473 CHECK(c != NULL);
474 if (c->IsArrayClass()) {
475 return JDWP::JT_ARRAY;
476 }
477 if (c->IsStringClass()) {
478 return JDWP::JT_STRING;
479 }
480 if (c->IsClassClass()) {
481 return JDWP::JT_CLASS_OBJECT;
482 }
483 {
484 mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
485 if (thread_class->IsAssignableFrom(c)) {
486 return JDWP::JT_THREAD;
487 }
488 }
489 {
490 mirror::Class* thread_group_class =
491 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
492 if (thread_group_class->IsAssignableFrom(c)) {
493 return JDWP::JT_THREAD_GROUP;
494 }
495 }
496 {
497 mirror::Class* class_loader_class =
498 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
499 if (class_loader_class->IsAssignableFrom(c)) {
500 return JDWP::JT_CLASS_LOADER;
501 }
502 }
503 return JDWP::JT_OBJECT;
504 }
505
506 /*
507 * Objects declared to hold Object might actually hold a more specific
508 * type. The debugger may take a special interest in these (e.g. it
509 * wants to display the contents of Strings), so we want to return an
510 * appropriate tag.
511 *
512 * Null objects are tagged JT_OBJECT.
513 */
TagFromObject(const ScopedObjectAccessUnchecked & soa,mirror::Object * o)514 JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
515 return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
516 }
517
IsPrimitiveTag(JDWP::JdwpTag tag)518 static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
519 switch (tag) {
520 case JDWP::JT_BOOLEAN:
521 case JDWP::JT_BYTE:
522 case JDWP::JT_CHAR:
523 case JDWP::JT_FLOAT:
524 case JDWP::JT_DOUBLE:
525 case JDWP::JT_INT:
526 case JDWP::JT_LONG:
527 case JDWP::JT_SHORT:
528 case JDWP::JT_VOID:
529 return true;
530 default:
531 return false;
532 }
533 }
534
535 /*
536 * Handle one of the JDWP name/value pairs.
537 *
538 * JDWP options are:
539 * help: if specified, show help message and bail
540 * transport: may be dt_socket or dt_shmem
541 * address: for dt_socket, "host:port", or just "port" when listening
542 * server: if "y", wait for debugger to attach; if "n", attach to debugger
543 * timeout: how long to wait for debugger to connect / listen
544 *
545 * Useful with server=n (these aren't supported yet):
546 * onthrow=<exception-name>: connect to debugger when exception thrown
547 * onuncaught=y|n: connect to debugger when uncaught exception thrown
548 * launch=<command-line>: launch the debugger itself
549 *
550 * The "transport" option is required, as is "address" if server=n.
551 */
ParseJdwpOption(const std::string & name,const std::string & value)552 static bool ParseJdwpOption(const std::string& name, const std::string& value) {
553 if (name == "transport") {
554 if (value == "dt_socket") {
555 gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
556 } else if (value == "dt_android_adb") {
557 gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
558 } else {
559 LOG(ERROR) << "JDWP transport not supported: " << value;
560 return false;
561 }
562 } else if (name == "server") {
563 if (value == "n") {
564 gJdwpOptions.server = false;
565 } else if (value == "y") {
566 gJdwpOptions.server = true;
567 } else {
568 LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
569 return false;
570 }
571 } else if (name == "suspend") {
572 if (value == "n") {
573 gJdwpOptions.suspend = false;
574 } else if (value == "y") {
575 gJdwpOptions.suspend = true;
576 } else {
577 LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
578 return false;
579 }
580 } else if (name == "address") {
581 /* this is either <port> or <host>:<port> */
582 std::string port_string;
583 gJdwpOptions.host.clear();
584 std::string::size_type colon = value.find(':');
585 if (colon != std::string::npos) {
586 gJdwpOptions.host = value.substr(0, colon);
587 port_string = value.substr(colon + 1);
588 } else {
589 port_string = value;
590 }
591 if (port_string.empty()) {
592 LOG(ERROR) << "JDWP address missing port: " << value;
593 return false;
594 }
595 char* end;
596 uint64_t port = strtoul(port_string.c_str(), &end, 10);
597 if (*end != '\0' || port > 0xffff) {
598 LOG(ERROR) << "JDWP address has junk in port field: " << value;
599 return false;
600 }
601 gJdwpOptions.port = port;
602 } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
603 /* valid but unsupported */
604 LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
605 } else {
606 LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
607 }
608
609 return true;
610 }
611
612 /*
613 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
614 * "transport=dt_socket,address=8000,server=y,suspend=n"
615 */
ParseJdwpOptions(const std::string & options)616 bool Dbg::ParseJdwpOptions(const std::string& options) {
617 VLOG(jdwp) << "ParseJdwpOptions: " << options;
618
619 std::vector<std::string> pairs;
620 Split(options, ',', pairs);
621
622 for (size_t i = 0; i < pairs.size(); ++i) {
623 std::string::size_type equals = pairs[i].find('=');
624 if (equals == std::string::npos) {
625 LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
626 return false;
627 }
628 ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
629 }
630
631 if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
632 LOG(ERROR) << "Must specify JDWP transport: " << options;
633 }
634 if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
635 LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
636 return false;
637 }
638
639 gJdwpConfigured = true;
640 return true;
641 }
642
StartJdwp()643 void Dbg::StartJdwp() {
644 if (!gJdwpAllowed || !IsJdwpConfigured()) {
645 // No JDWP for you!
646 return;
647 }
648
649 CHECK(gRegistry == nullptr);
650 gRegistry = new ObjectRegistry;
651
652 // Init JDWP if the debugger is enabled. This may connect out to a
653 // debugger, passively listen for a debugger, or block waiting for a
654 // debugger.
655 gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
656 if (gJdwpState == NULL) {
657 // We probably failed because some other process has the port already, which means that
658 // if we don't abort the user is likely to think they're talking to us when they're actually
659 // talking to that other process.
660 LOG(FATAL) << "Debugger thread failed to initialize";
661 }
662
663 // If a debugger has already attached, send the "welcome" message.
664 // This may cause us to suspend all threads.
665 if (gJdwpState->IsActive()) {
666 ScopedObjectAccess soa(Thread::Current());
667 if (!gJdwpState->PostVMStart()) {
668 LOG(WARNING) << "Failed to post 'start' message to debugger";
669 }
670 }
671 }
672
StopJdwp()673 void Dbg::StopJdwp() {
674 // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
675 // destruction of gJdwpState).
676 if (gJdwpState != nullptr && gJdwpState->IsActive()) {
677 gJdwpState->PostVMDeath();
678 }
679 // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
680 Disposed();
681 delete gJdwpState;
682 gJdwpState = nullptr;
683 delete gRegistry;
684 gRegistry = nullptr;
685 }
686
GcDidFinish()687 void Dbg::GcDidFinish() {
688 if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
689 ScopedObjectAccess soa(Thread::Current());
690 VLOG(jdwp) << "Sending heap info to DDM";
691 DdmSendHeapInfo(gDdmHpifWhen);
692 }
693 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
694 ScopedObjectAccess soa(Thread::Current());
695 VLOG(jdwp) << "Dumping heap to DDM";
696 DdmSendHeapSegments(false);
697 }
698 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
699 ScopedObjectAccess soa(Thread::Current());
700 VLOG(jdwp) << "Dumping native heap to DDM";
701 DdmSendHeapSegments(true);
702 }
703 }
704
SetJdwpAllowed(bool allowed)705 void Dbg::SetJdwpAllowed(bool allowed) {
706 gJdwpAllowed = allowed;
707 }
708
GetInvokeReq()709 DebugInvokeReq* Dbg::GetInvokeReq() {
710 return Thread::Current()->GetInvokeReq();
711 }
712
GetDebugThread()713 Thread* Dbg::GetDebugThread() {
714 return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
715 }
716
ClearWaitForEventThread()717 void Dbg::ClearWaitForEventThread() {
718 gJdwpState->ClearWaitForEventThread();
719 }
720
Connected()721 void Dbg::Connected() {
722 CHECK(!gDebuggerConnected);
723 VLOG(jdwp) << "JDWP has attached";
724 gDebuggerConnected = true;
725 gDisposed = false;
726 }
727
Disposed()728 void Dbg::Disposed() {
729 gDisposed = true;
730 }
731
IsDisposed()732 bool Dbg::IsDisposed() {
733 return gDisposed;
734 }
735
RequiresDeoptimization()736 bool Dbg::RequiresDeoptimization() {
737 // We don't need deoptimization if everything runs with interpreter after
738 // enabling -Xint mode.
739 return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
740 }
741
GoActive()742 void Dbg::GoActive() {
743 // Enable all debugging features, including scans for breakpoints.
744 // This is a no-op if we're already active.
745 // Only called from the JDWP handler thread.
746 if (gDebuggerActive) {
747 return;
748 }
749
750 {
751 // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
752 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
753 CHECK_EQ(gBreakpoints.size(), 0U);
754 }
755
756 {
757 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
758 CHECK_EQ(deoptimization_requests_.size(), 0U);
759 CHECK_EQ(full_deoptimization_event_count_, 0U);
760 CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
761 CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
762 CHECK_EQ(method_enter_event_ref_count_, 0U);
763 CHECK_EQ(method_exit_event_ref_count_, 0U);
764 CHECK_EQ(field_read_event_ref_count_, 0U);
765 CHECK_EQ(field_write_event_ref_count_, 0U);
766 CHECK_EQ(exception_catch_event_ref_count_, 0U);
767 }
768
769 Runtime* runtime = Runtime::Current();
770 runtime->GetThreadList()->SuspendAll();
771 Thread* self = Thread::Current();
772 ThreadState old_state = self->SetStateUnsafe(kRunnable);
773 CHECK_NE(old_state, kRunnable);
774 if (RequiresDeoptimization()) {
775 runtime->GetInstrumentation()->EnableDeoptimization();
776 }
777 instrumentation_events_ = 0;
778 gDebuggerActive = true;
779 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
780 runtime->GetThreadList()->ResumeAll();
781
782 LOG(INFO) << "Debugger is active";
783 }
784
Disconnected()785 void Dbg::Disconnected() {
786 CHECK(gDebuggerConnected);
787
788 LOG(INFO) << "Debugger is no longer active";
789
790 // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
791 // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
792 // and clear the object registry.
793 Runtime* runtime = Runtime::Current();
794 runtime->GetThreadList()->SuspendAll();
795 Thread* self = Thread::Current();
796 ThreadState old_state = self->SetStateUnsafe(kRunnable);
797
798 // Debugger may not be active at this point.
799 if (gDebuggerActive) {
800 {
801 // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
802 // This prevents us from having any pending deoptimization request when the debugger attaches
803 // to us again while no event has been requested yet.
804 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
805 deoptimization_requests_.clear();
806 full_deoptimization_event_count_ = 0U;
807 delayed_full_undeoptimization_count_ = 0U;
808 }
809 if (instrumentation_events_ != 0) {
810 runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
811 instrumentation_events_);
812 instrumentation_events_ = 0;
813 }
814 if (RequiresDeoptimization()) {
815 runtime->GetInstrumentation()->DisableDeoptimization();
816 }
817 gDebuggerActive = false;
818 }
819 gRegistry->Clear();
820 gDebuggerConnected = false;
821 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
822 runtime->GetThreadList()->ResumeAll();
823 }
824
IsDebuggerActive()825 bool Dbg::IsDebuggerActive() {
826 return gDebuggerActive;
827 }
828
IsJdwpConfigured()829 bool Dbg::IsJdwpConfigured() {
830 return gJdwpConfigured;
831 }
832
LastDebuggerActivity()833 int64_t Dbg::LastDebuggerActivity() {
834 return gJdwpState->LastDebuggerActivity();
835 }
836
UndoDebuggerSuspensions()837 void Dbg::UndoDebuggerSuspensions() {
838 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
839 }
840
GetClassName(JDWP::RefTypeId class_id)841 std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
842 mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
843 if (o == NULL) {
844 return "NULL";
845 }
846 if (o == ObjectRegistry::kInvalidObject) {
847 return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
848 }
849 if (!o->IsClass()) {
850 return StringPrintf("non-class %p", o); // This is only used for debugging output anyway.
851 }
852 return GetClassName(o->AsClass());
853 }
854
GetClassName(mirror::Class * klass)855 std::string Dbg::GetClassName(mirror::Class* klass) {
856 if (klass == nullptr) {
857 return "NULL";
858 }
859 std::string temp;
860 return DescriptorToName(klass->GetDescriptor(&temp));
861 }
862
GetClassObject(JDWP::RefTypeId id,JDWP::ObjectId & class_object_id)863 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
864 JDWP::JdwpError status;
865 mirror::Class* c = DecodeClass(id, status);
866 if (c == NULL) {
867 return status;
868 }
869 class_object_id = gRegistry->Add(c);
870 return JDWP::ERR_NONE;
871 }
872
GetSuperclass(JDWP::RefTypeId id,JDWP::RefTypeId & superclass_id)873 JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
874 JDWP::JdwpError status;
875 mirror::Class* c = DecodeClass(id, status);
876 if (c == NULL) {
877 return status;
878 }
879 if (c->IsInterface()) {
880 // http://code.google.com/p/android/issues/detail?id=20856
881 superclass_id = 0;
882 } else {
883 superclass_id = gRegistry->Add(c->GetSuperClass());
884 }
885 return JDWP::ERR_NONE;
886 }
887
GetClassLoader(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)888 JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
889 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
890 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
891 return JDWP::ERR_INVALID_OBJECT;
892 }
893 expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
894 return JDWP::ERR_NONE;
895 }
896
GetModifiers(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)897 JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
898 JDWP::JdwpError status;
899 mirror::Class* c = DecodeClass(id, status);
900 if (c == NULL) {
901 return status;
902 }
903
904 uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
905
906 // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
907 // not interfaces.
908 // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
909 if ((access_flags & kAccInterface) == 0) {
910 access_flags |= kAccSuper;
911 }
912
913 expandBufAdd4BE(pReply, access_flags);
914
915 return JDWP::ERR_NONE;
916 }
917
GetMonitorInfo(JDWP::ObjectId object_id,JDWP::ExpandBuf * reply)918 JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
919 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
920 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
921 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
922 return JDWP::ERR_INVALID_OBJECT;
923 }
924
925 // Ensure all threads are suspended while we read objects' lock words.
926 Thread* self = Thread::Current();
927 CHECK_EQ(self->GetState(), kRunnable);
928 self->TransitionFromRunnableToSuspended(kSuspended);
929 Runtime::Current()->GetThreadList()->SuspendAll();
930
931 MonitorInfo monitor_info(o);
932
933 Runtime::Current()->GetThreadList()->ResumeAll();
934 self->TransitionFromSuspendedToRunnable();
935
936 if (monitor_info.owner_ != NULL) {
937 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
938 } else {
939 expandBufAddObjectId(reply, gRegistry->Add(NULL));
940 }
941 expandBufAdd4BE(reply, monitor_info.entry_count_);
942 expandBufAdd4BE(reply, monitor_info.waiters_.size());
943 for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
944 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
945 }
946 return JDWP::ERR_NONE;
947 }
948
GetOwnedMonitors(JDWP::ObjectId thread_id,std::vector<JDWP::ObjectId> & monitors,std::vector<uint32_t> & stack_depths)949 JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
950 std::vector<JDWP::ObjectId>& monitors,
951 std::vector<uint32_t>& stack_depths) {
952 struct OwnedMonitorVisitor : public StackVisitor {
953 OwnedMonitorVisitor(Thread* thread, Context* context,
954 std::vector<JDWP::ObjectId>* monitor_vector,
955 std::vector<uint32_t>* stack_depth_vector)
956 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
957 : StackVisitor(thread, context), current_stack_depth(0),
958 monitors(monitor_vector), stack_depths(stack_depth_vector) {}
959
960 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
961 // annotalysis.
962 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
963 if (!GetMethod()->IsRuntimeMethod()) {
964 Monitor::VisitLocks(this, AppendOwnedMonitors, this);
965 ++current_stack_depth;
966 }
967 return true;
968 }
969
970 static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
971 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
972 OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
973 visitor->monitors->push_back(gRegistry->Add(owned_monitor));
974 visitor->stack_depths->push_back(visitor->current_stack_depth);
975 }
976
977 size_t current_stack_depth;
978 std::vector<JDWP::ObjectId>* monitors;
979 std::vector<uint32_t>* stack_depths;
980 };
981
982 ScopedObjectAccessUnchecked soa(Thread::Current());
983 Thread* thread;
984 {
985 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
986 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
987 if (error != JDWP::ERR_NONE) {
988 return error;
989 }
990 if (!IsSuspendedForDebugger(soa, thread)) {
991 return JDWP::ERR_THREAD_NOT_SUSPENDED;
992 }
993 }
994 std::unique_ptr<Context> context(Context::Create());
995 OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths);
996 visitor.WalkStack();
997 return JDWP::ERR_NONE;
998 }
999
GetContendedMonitor(JDWP::ObjectId thread_id,JDWP::ObjectId & contended_monitor)1000 JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
1001 JDWP::ObjectId& contended_monitor) {
1002 mirror::Object* contended_monitor_obj;
1003 ScopedObjectAccessUnchecked soa(Thread::Current());
1004 {
1005 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1006 Thread* thread;
1007 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1008 if (error != JDWP::ERR_NONE) {
1009 return error;
1010 }
1011 if (!IsSuspendedForDebugger(soa, thread)) {
1012 return JDWP::ERR_THREAD_NOT_SUSPENDED;
1013 }
1014 contended_monitor_obj = Monitor::GetContendedMonitor(thread);
1015 }
1016 // Add() requires the thread_list_lock_ not held to avoid the lock
1017 // level violation.
1018 contended_monitor = gRegistry->Add(contended_monitor_obj);
1019 return JDWP::ERR_NONE;
1020 }
1021
GetInstanceCounts(const std::vector<JDWP::RefTypeId> & class_ids,std::vector<uint64_t> & counts)1022 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1023 std::vector<uint64_t>& counts)
1024 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1025 gc::Heap* heap = Runtime::Current()->GetHeap();
1026 heap->CollectGarbage(false);
1027 std::vector<mirror::Class*> classes;
1028 counts.clear();
1029 for (size_t i = 0; i < class_ids.size(); ++i) {
1030 JDWP::JdwpError status;
1031 mirror::Class* c = DecodeClass(class_ids[i], status);
1032 if (c == NULL) {
1033 return status;
1034 }
1035 classes.push_back(c);
1036 counts.push_back(0);
1037 }
1038 heap->CountInstances(classes, false, &counts[0]);
1039 return JDWP::ERR_NONE;
1040 }
1041
GetInstances(JDWP::RefTypeId class_id,int32_t max_count,std::vector<JDWP::ObjectId> & instances)1042 JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
1043 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1044 gc::Heap* heap = Runtime::Current()->GetHeap();
1045 // We only want reachable instances, so do a GC.
1046 heap->CollectGarbage(false);
1047 JDWP::JdwpError status;
1048 mirror::Class* c = DecodeClass(class_id, status);
1049 if (c == nullptr) {
1050 return status;
1051 }
1052 std::vector<mirror::Object*> raw_instances;
1053 Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1054 for (size_t i = 0; i < raw_instances.size(); ++i) {
1055 instances.push_back(gRegistry->Add(raw_instances[i]));
1056 }
1057 return JDWP::ERR_NONE;
1058 }
1059
GetReferringObjects(JDWP::ObjectId object_id,int32_t max_count,std::vector<JDWP::ObjectId> & referring_objects)1060 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1061 std::vector<JDWP::ObjectId>& referring_objects)
1062 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1063 gc::Heap* heap = Runtime::Current()->GetHeap();
1064 heap->CollectGarbage(false);
1065 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1066 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1067 return JDWP::ERR_INVALID_OBJECT;
1068 }
1069 std::vector<mirror::Object*> raw_instances;
1070 heap->GetReferringObjects(o, max_count, raw_instances);
1071 for (size_t i = 0; i < raw_instances.size(); ++i) {
1072 referring_objects.push_back(gRegistry->Add(raw_instances[i]));
1073 }
1074 return JDWP::ERR_NONE;
1075 }
1076
DisableCollection(JDWP::ObjectId object_id)1077 JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
1078 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1079 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1080 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1081 return JDWP::ERR_INVALID_OBJECT;
1082 }
1083 gRegistry->DisableCollection(object_id);
1084 return JDWP::ERR_NONE;
1085 }
1086
EnableCollection(JDWP::ObjectId object_id)1087 JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
1088 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1089 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1090 // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1091 // also ignores these cases and never return an error. However it's not obvious why this command
1092 // should behave differently from DisableCollection and IsCollected commands. So let's be more
1093 // strict and return an error if this happens.
1094 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1095 return JDWP::ERR_INVALID_OBJECT;
1096 }
1097 gRegistry->EnableCollection(object_id);
1098 return JDWP::ERR_NONE;
1099 }
1100
IsCollected(JDWP::ObjectId object_id,bool & is_collected)1101 JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
1102 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1103 if (object_id == 0) {
1104 // Null object id is invalid.
1105 return JDWP::ERR_INVALID_OBJECT;
1106 }
1107 // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1108 // the RI seems to ignore this and assume object has been collected.
1109 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1110 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1111 is_collected = true;
1112 } else {
1113 is_collected = gRegistry->IsCollected(object_id);
1114 }
1115 return JDWP::ERR_NONE;
1116 }
1117
DisposeObject(JDWP::ObjectId object_id,uint32_t reference_count)1118 void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1119 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1120 gRegistry->DisposeObject(object_id, reference_count);
1121 }
1122
GetTypeTag(mirror::Class * klass)1123 JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
1124 DCHECK(klass != nullptr);
1125 if (klass->IsArrayClass()) {
1126 return JDWP::TT_ARRAY;
1127 } else if (klass->IsInterface()) {
1128 return JDWP::TT_INTERFACE;
1129 } else {
1130 return JDWP::TT_CLASS;
1131 }
1132 }
1133
GetReflectedType(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1134 JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1135 JDWP::JdwpError status;
1136 mirror::Class* c = DecodeClass(class_id, status);
1137 if (c == NULL) {
1138 return status;
1139 }
1140
1141 JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1142 expandBufAdd1(pReply, type_tag);
1143 expandBufAddRefTypeId(pReply, class_id);
1144 return JDWP::ERR_NONE;
1145 }
1146
GetClassList(std::vector<JDWP::RefTypeId> & classes)1147 void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1148 // Get the complete list of reference classes (i.e. all classes except
1149 // the primitive types).
1150 // Returns a newly-allocated buffer full of RefTypeId values.
1151 struct ClassListCreator {
1152 explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1153 }
1154
1155 static bool Visit(mirror::Class* c, void* arg) {
1156 return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1157 }
1158
1159 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1160 // annotalysis.
1161 bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1162 if (!c->IsPrimitive()) {
1163 classes.push_back(gRegistry->AddRefType(c));
1164 }
1165 return true;
1166 }
1167
1168 std::vector<JDWP::RefTypeId>& classes;
1169 };
1170
1171 ClassListCreator clc(classes);
1172 Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1173 &clc);
1174 }
1175
GetClassInfo(JDWP::RefTypeId class_id,JDWP::JdwpTypeTag * pTypeTag,uint32_t * pStatus,std::string * pDescriptor)1176 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1177 uint32_t* pStatus, std::string* pDescriptor) {
1178 JDWP::JdwpError status;
1179 mirror::Class* c = DecodeClass(class_id, status);
1180 if (c == NULL) {
1181 return status;
1182 }
1183
1184 if (c->IsArrayClass()) {
1185 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1186 *pTypeTag = JDWP::TT_ARRAY;
1187 } else {
1188 if (c->IsErroneous()) {
1189 *pStatus = JDWP::CS_ERROR;
1190 } else {
1191 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1192 }
1193 *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1194 }
1195
1196 if (pDescriptor != NULL) {
1197 std::string temp;
1198 *pDescriptor = c->GetDescriptor(&temp);
1199 }
1200 return JDWP::ERR_NONE;
1201 }
1202
FindLoadedClassBySignature(const char * descriptor,std::vector<JDWP::RefTypeId> & ids)1203 void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1204 std::vector<mirror::Class*> classes;
1205 Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1206 ids.clear();
1207 for (size_t i = 0; i < classes.size(); ++i) {
1208 ids.push_back(gRegistry->Add(classes[i]));
1209 }
1210 }
1211
GetReferenceType(JDWP::ObjectId object_id,JDWP::ExpandBuf * pReply)1212 JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1213 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1214 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1215 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1216 return JDWP::ERR_INVALID_OBJECT;
1217 }
1218
1219 JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1220 JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1221
1222 expandBufAdd1(pReply, type_tag);
1223 expandBufAddRefTypeId(pReply, type_id);
1224
1225 return JDWP::ERR_NONE;
1226 }
1227
GetSignature(JDWP::RefTypeId class_id,std::string * signature)1228 JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1229 JDWP::JdwpError status;
1230 mirror::Class* c = DecodeClass(class_id, status);
1231 if (c == NULL) {
1232 return status;
1233 }
1234 std::string temp;
1235 *signature = c->GetDescriptor(&temp);
1236 return JDWP::ERR_NONE;
1237 }
1238
GetSourceFile(JDWP::RefTypeId class_id,std::string & result)1239 JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1240 JDWP::JdwpError status;
1241 mirror::Class* c = DecodeClass(class_id, status);
1242 if (c == nullptr) {
1243 return status;
1244 }
1245 const char* source_file = c->GetSourceFile();
1246 if (source_file == nullptr) {
1247 return JDWP::ERR_ABSENT_INFORMATION;
1248 }
1249 result = source_file;
1250 return JDWP::ERR_NONE;
1251 }
1252
GetObjectTag(JDWP::ObjectId object_id,uint8_t & tag)1253 JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1254 ScopedObjectAccessUnchecked soa(Thread::Current());
1255 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1256 if (o == ObjectRegistry::kInvalidObject) {
1257 return JDWP::ERR_INVALID_OBJECT;
1258 }
1259 tag = TagFromObject(soa, o);
1260 return JDWP::ERR_NONE;
1261 }
1262
GetTagWidth(JDWP::JdwpTag tag)1263 size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1264 switch (tag) {
1265 case JDWP::JT_VOID:
1266 return 0;
1267 case JDWP::JT_BYTE:
1268 case JDWP::JT_BOOLEAN:
1269 return 1;
1270 case JDWP::JT_CHAR:
1271 case JDWP::JT_SHORT:
1272 return 2;
1273 case JDWP::JT_FLOAT:
1274 case JDWP::JT_INT:
1275 return 4;
1276 case JDWP::JT_ARRAY:
1277 case JDWP::JT_OBJECT:
1278 case JDWP::JT_STRING:
1279 case JDWP::JT_THREAD:
1280 case JDWP::JT_THREAD_GROUP:
1281 case JDWP::JT_CLASS_LOADER:
1282 case JDWP::JT_CLASS_OBJECT:
1283 return sizeof(JDWP::ObjectId);
1284 case JDWP::JT_DOUBLE:
1285 case JDWP::JT_LONG:
1286 return 8;
1287 default:
1288 LOG(FATAL) << "Unknown tag " << tag;
1289 return -1;
1290 }
1291 }
1292
GetArrayLength(JDWP::ObjectId array_id,int & length)1293 JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1294 JDWP::JdwpError status;
1295 mirror::Array* a = DecodeArray(array_id, status);
1296 if (a == NULL) {
1297 return status;
1298 }
1299 length = a->GetLength();
1300 return JDWP::ERR_NONE;
1301 }
1302
OutputArray(JDWP::ObjectId array_id,int offset,int count,JDWP::ExpandBuf * pReply)1303 JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1304 JDWP::JdwpError status;
1305 mirror::Array* a = DecodeArray(array_id, status);
1306 if (a == nullptr) {
1307 return status;
1308 }
1309
1310 if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1311 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1312 return JDWP::ERR_INVALID_LENGTH;
1313 }
1314 JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1315 expandBufAdd1(pReply, element_tag);
1316 expandBufAdd4BE(pReply, count);
1317
1318 if (IsPrimitiveTag(element_tag)) {
1319 size_t width = GetTagWidth(element_tag);
1320 uint8_t* dst = expandBufAddSpace(pReply, count * width);
1321 if (width == 8) {
1322 const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1323 for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1324 } else if (width == 4) {
1325 const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1326 for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1327 } else if (width == 2) {
1328 const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1329 for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1330 } else {
1331 const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1332 memcpy(dst, &src[offset * width], count * width);
1333 }
1334 } else {
1335 ScopedObjectAccessUnchecked soa(Thread::Current());
1336 mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1337 for (int i = 0; i < count; ++i) {
1338 mirror::Object* element = oa->Get(offset + i);
1339 JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1340 : element_tag;
1341 expandBufAdd1(pReply, specific_tag);
1342 expandBufAddObjectId(pReply, gRegistry->Add(element));
1343 }
1344 }
1345
1346 return JDWP::ERR_NONE;
1347 }
1348
1349 template <typename T>
CopyArrayData(mirror::Array * a,JDWP::Request & src,int offset,int count)1350 static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1351 NO_THREAD_SAFETY_ANALYSIS {
1352 // TODO: fix when annotalysis correctly handles non-member functions.
1353 DCHECK(a->GetClass()->IsPrimitiveArray());
1354
1355 T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1356 for (int i = 0; i < count; ++i) {
1357 *dst++ = src.ReadValue(sizeof(T));
1358 }
1359 }
1360
SetArrayElements(JDWP::ObjectId array_id,int offset,int count,JDWP::Request & request)1361 JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1362 JDWP::Request& request)
1363 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1364 JDWP::JdwpError status;
1365 mirror::Array* dst = DecodeArray(array_id, status);
1366 if (dst == NULL) {
1367 return status;
1368 }
1369
1370 if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1371 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1372 return JDWP::ERR_INVALID_LENGTH;
1373 }
1374 JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1375
1376 if (IsPrimitiveTag(element_tag)) {
1377 size_t width = GetTagWidth(element_tag);
1378 if (width == 8) {
1379 CopyArrayData<uint64_t>(dst, request, offset, count);
1380 } else if (width == 4) {
1381 CopyArrayData<uint32_t>(dst, request, offset, count);
1382 } else if (width == 2) {
1383 CopyArrayData<uint16_t>(dst, request, offset, count);
1384 } else {
1385 CopyArrayData<uint8_t>(dst, request, offset, count);
1386 }
1387 } else {
1388 mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1389 for (int i = 0; i < count; ++i) {
1390 JDWP::ObjectId id = request.ReadObjectId();
1391 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1392 if (o == ObjectRegistry::kInvalidObject) {
1393 return JDWP::ERR_INVALID_OBJECT;
1394 }
1395 oa->Set<false>(offset + i, o);
1396 }
1397 }
1398
1399 return JDWP::ERR_NONE;
1400 }
1401
CreateString(const std::string & str)1402 JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1403 return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1404 }
1405
CreateObject(JDWP::RefTypeId class_id,JDWP::ObjectId & new_object)1406 JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1407 JDWP::JdwpError status;
1408 mirror::Class* c = DecodeClass(class_id, status);
1409 if (c == NULL) {
1410 return status;
1411 }
1412 new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1413 return JDWP::ERR_NONE;
1414 }
1415
1416 /*
1417 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1418 */
CreateArrayObject(JDWP::RefTypeId array_class_id,uint32_t length,JDWP::ObjectId & new_array)1419 JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1420 JDWP::ObjectId& new_array) {
1421 JDWP::JdwpError status;
1422 mirror::Class* c = DecodeClass(array_class_id, status);
1423 if (c == NULL) {
1424 return status;
1425 }
1426 new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1427 c->GetComponentSize(),
1428 Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1429 return JDWP::ERR_NONE;
1430 }
1431
ToFieldId(const mirror::ArtField * f)1432 JDWP::FieldId Dbg::ToFieldId(const mirror::ArtField* f) {
1433 CHECK(!kMovingFields);
1434 return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1435 }
1436
ToMethodId(const mirror::ArtMethod * m)1437 static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1438 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1439 CHECK(!kMovingMethods);
1440 return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1441 }
1442
FromFieldId(JDWP::FieldId fid)1443 static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1444 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1445 CHECK(!kMovingFields);
1446 return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1447 }
1448
FromMethodId(JDWP::MethodId mid)1449 static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1450 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1451 CHECK(!kMovingMethods);
1452 return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1453 }
1454
MatchThread(JDWP::ObjectId expected_thread_id,Thread * event_thread)1455 bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1456 CHECK(event_thread != nullptr);
1457 mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id);
1458 return expected_thread_peer == event_thread->GetPeer();
1459 }
1460
MatchLocation(const JDWP::JdwpLocation & expected_location,const JDWP::EventLocation & event_location)1461 bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1462 const JDWP::EventLocation& event_location) {
1463 if (expected_location.dex_pc != event_location.dex_pc) {
1464 return false;
1465 }
1466 mirror::ArtMethod* m = FromMethodId(expected_location.method_id);
1467 return m == event_location.method;
1468 }
1469
MatchType(mirror::Class * event_class,JDWP::RefTypeId class_id)1470 bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1471 if (event_class == nullptr) {
1472 return false;
1473 }
1474 JDWP::JdwpError status;
1475 mirror::Class* expected_class = DecodeClass(class_id, status);
1476 CHECK(expected_class != nullptr);
1477 return expected_class->IsAssignableFrom(event_class);
1478 }
1479
MatchField(JDWP::RefTypeId expected_type_id,JDWP::FieldId expected_field_id,mirror::ArtField * event_field)1480 bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1481 mirror::ArtField* event_field) {
1482 mirror::ArtField* expected_field = FromFieldId(expected_field_id);
1483 if (expected_field != event_field) {
1484 return false;
1485 }
1486 return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1487 }
1488
MatchInstance(JDWP::ObjectId expected_instance_id,mirror::Object * event_instance)1489 bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1490 mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id);
1491 return modifier_instance == event_instance;
1492 }
1493
SetJdwpLocation(JDWP::JdwpLocation * location,mirror::ArtMethod * m,uint32_t dex_pc)1494 void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
1495 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1496 if (m == nullptr) {
1497 memset(location, 0, sizeof(*location));
1498 } else {
1499 mirror::Class* c = m->GetDeclaringClass();
1500 location->type_tag = GetTypeTag(c);
1501 location->class_id = gRegistry->AddRefType(c);
1502 location->method_id = ToMethodId(m);
1503 location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1504 }
1505 }
1506
GetMethodName(JDWP::MethodId method_id)1507 std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1508 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1509 mirror::ArtMethod* m = FromMethodId(method_id);
1510 if (m == nullptr) {
1511 return "NULL";
1512 }
1513 return m->GetName();
1514 }
1515
GetFieldName(JDWP::FieldId field_id)1516 std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1517 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1518 mirror::ArtField* f = FromFieldId(field_id);
1519 if (f == nullptr) {
1520 return "NULL";
1521 }
1522 return f->GetName();
1523 }
1524
1525 /*
1526 * Augment the access flags for synthetic methods and fields by setting
1527 * the (as described by the spec) "0xf0000000 bit". Also, strip out any
1528 * flags not specified by the Java programming language.
1529 */
MangleAccessFlags(uint32_t accessFlags)1530 static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1531 accessFlags &= kAccJavaFlagsMask;
1532 if ((accessFlags & kAccSynthetic) != 0) {
1533 accessFlags |= 0xf0000000;
1534 }
1535 return accessFlags;
1536 }
1537
1538 /*
1539 * Circularly shifts registers so that arguments come first. Debuggers
1540 * expect slots to begin with arguments, but dex code places them at
1541 * the end.
1542 */
MangleSlot(uint16_t slot,mirror::ArtMethod * m)1543 static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1544 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1545 const DexFile::CodeItem* code_item = m->GetCodeItem();
1546 if (code_item == nullptr) {
1547 // We should not get here for a method without code (native, proxy or abstract). Log it and
1548 // return the slot as is since all registers are arguments.
1549 LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1550 return slot;
1551 }
1552 uint16_t ins_size = code_item->ins_size_;
1553 uint16_t locals_size = code_item->registers_size_ - ins_size;
1554 if (slot >= locals_size) {
1555 return slot - locals_size;
1556 } else {
1557 return slot + ins_size;
1558 }
1559 }
1560
1561 /*
1562 * Circularly shifts registers so that arguments come last. Reverts
1563 * slots to dex style argument placement.
1564 */
DemangleSlot(uint16_t slot,mirror::ArtMethod * m)1565 static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1566 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1567 const DexFile::CodeItem* code_item = m->GetCodeItem();
1568 if (code_item == nullptr) {
1569 // We should not get here for a method without code (native, proxy or abstract). Log it and
1570 // return the slot as is since all registers are arguments.
1571 LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1572 return slot;
1573 }
1574 uint16_t ins_size = code_item->ins_size_;
1575 uint16_t locals_size = code_item->registers_size_ - ins_size;
1576 if (slot < ins_size) {
1577 return slot + locals_size;
1578 } else {
1579 return slot - ins_size;
1580 }
1581 }
1582
OutputDeclaredFields(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1583 JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1584 JDWP::JdwpError status;
1585 mirror::Class* c = DecodeClass(class_id, status);
1586 if (c == NULL) {
1587 return status;
1588 }
1589
1590 size_t instance_field_count = c->NumInstanceFields();
1591 size_t static_field_count = c->NumStaticFields();
1592
1593 expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1594
1595 for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1596 mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1597 expandBufAddFieldId(pReply, ToFieldId(f));
1598 expandBufAddUtf8String(pReply, f->GetName());
1599 expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1600 if (with_generic) {
1601 static const char genericSignature[1] = "";
1602 expandBufAddUtf8String(pReply, genericSignature);
1603 }
1604 expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1605 }
1606 return JDWP::ERR_NONE;
1607 }
1608
OutputDeclaredMethods(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1609 JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1610 JDWP::ExpandBuf* pReply) {
1611 JDWP::JdwpError status;
1612 mirror::Class* c = DecodeClass(class_id, status);
1613 if (c == NULL) {
1614 return status;
1615 }
1616
1617 size_t direct_method_count = c->NumDirectMethods();
1618 size_t virtual_method_count = c->NumVirtualMethods();
1619
1620 expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1621
1622 for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1623 mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1624 expandBufAddMethodId(pReply, ToMethodId(m));
1625 expandBufAddUtf8String(pReply, m->GetName());
1626 expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1627 if (with_generic) {
1628 static const char genericSignature[1] = "";
1629 expandBufAddUtf8String(pReply, genericSignature);
1630 }
1631 expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1632 }
1633 return JDWP::ERR_NONE;
1634 }
1635
OutputDeclaredInterfaces(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1636 JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1637 JDWP::JdwpError status;
1638 Thread* self = Thread::Current();
1639 StackHandleScope<1> hs(self);
1640 Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status)));
1641 if (c.Get() == nullptr) {
1642 return status;
1643 }
1644 size_t interface_count = c->NumDirectInterfaces();
1645 expandBufAdd4BE(pReply, interface_count);
1646 for (size_t i = 0; i < interface_count; ++i) {
1647 expandBufAddRefTypeId(pReply,
1648 gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1649 }
1650 return JDWP::ERR_NONE;
1651 }
1652
OutputLineTable(JDWP::RefTypeId,JDWP::MethodId method_id,JDWP::ExpandBuf * pReply)1653 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1654 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1655 struct DebugCallbackContext {
1656 int numItems;
1657 JDWP::ExpandBuf* pReply;
1658
1659 static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1660 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1661 expandBufAdd8BE(pContext->pReply, address);
1662 expandBufAdd4BE(pContext->pReply, line_number);
1663 pContext->numItems++;
1664 return false;
1665 }
1666 };
1667 mirror::ArtMethod* m = FromMethodId(method_id);
1668 const DexFile::CodeItem* code_item = m->GetCodeItem();
1669 uint64_t start, end;
1670 if (code_item == nullptr) {
1671 DCHECK(m->IsNative() || m->IsProxyMethod());
1672 start = -1;
1673 end = -1;
1674 } else {
1675 start = 0;
1676 // Return the index of the last instruction
1677 end = code_item->insns_size_in_code_units_ - 1;
1678 }
1679
1680 expandBufAdd8BE(pReply, start);
1681 expandBufAdd8BE(pReply, end);
1682
1683 // Add numLines later
1684 size_t numLinesOffset = expandBufGetLength(pReply);
1685 expandBufAdd4BE(pReply, 0);
1686
1687 DebugCallbackContext context;
1688 context.numItems = 0;
1689 context.pReply = pReply;
1690
1691 if (code_item != nullptr) {
1692 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1693 DebugCallbackContext::Callback, NULL, &context);
1694 }
1695
1696 JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1697 }
1698
OutputVariableTable(JDWP::RefTypeId,JDWP::MethodId method_id,bool with_generic,JDWP::ExpandBuf * pReply)1699 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1700 JDWP::ExpandBuf* pReply) {
1701 struct DebugCallbackContext {
1702 mirror::ArtMethod* method;
1703 JDWP::ExpandBuf* pReply;
1704 size_t variable_count;
1705 bool with_generic;
1706
1707 static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1708 const char* name, const char* descriptor, const char* signature)
1709 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1710 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1711
1712 VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1713 pContext->variable_count, startAddress, endAddress - startAddress,
1714 name, descriptor, signature, slot,
1715 MangleSlot(slot, pContext->method));
1716
1717 slot = MangleSlot(slot, pContext->method);
1718
1719 expandBufAdd8BE(pContext->pReply, startAddress);
1720 expandBufAddUtf8String(pContext->pReply, name);
1721 expandBufAddUtf8String(pContext->pReply, descriptor);
1722 if (pContext->with_generic) {
1723 expandBufAddUtf8String(pContext->pReply, signature);
1724 }
1725 expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1726 expandBufAdd4BE(pContext->pReply, slot);
1727
1728 ++pContext->variable_count;
1729 }
1730 };
1731 mirror::ArtMethod* m = FromMethodId(method_id);
1732
1733 // arg_count considers doubles and longs to take 2 units.
1734 // variable_count considers everything to take 1 unit.
1735 std::string shorty(m->GetShorty());
1736 expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1737
1738 // We don't know the total number of variables yet, so leave a blank and update it later.
1739 size_t variable_count_offset = expandBufGetLength(pReply);
1740 expandBufAdd4BE(pReply, 0);
1741
1742 DebugCallbackContext context;
1743 context.method = m;
1744 context.pReply = pReply;
1745 context.variable_count = 0;
1746 context.with_generic = with_generic;
1747
1748 const DexFile::CodeItem* code_item = m->GetCodeItem();
1749 if (code_item != nullptr) {
1750 m->GetDexFile()->DecodeDebugInfo(
1751 code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback,
1752 &context);
1753 }
1754
1755 JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1756 }
1757
OutputMethodReturnValue(JDWP::MethodId method_id,const JValue * return_value,JDWP::ExpandBuf * pReply)1758 void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1759 JDWP::ExpandBuf* pReply) {
1760 mirror::ArtMethod* m = FromMethodId(method_id);
1761 JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1762 OutputJValue(tag, return_value, pReply);
1763 }
1764
OutputFieldValue(JDWP::FieldId field_id,const JValue * field_value,JDWP::ExpandBuf * pReply)1765 void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1766 JDWP::ExpandBuf* pReply) {
1767 mirror::ArtField* f = FromFieldId(field_id);
1768 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1769 OutputJValue(tag, field_value, pReply);
1770 }
1771
GetBytecodes(JDWP::RefTypeId,JDWP::MethodId method_id,std::vector<uint8_t> & bytecodes)1772 JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1773 std::vector<uint8_t>& bytecodes)
1774 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1775 mirror::ArtMethod* m = FromMethodId(method_id);
1776 if (m == NULL) {
1777 return JDWP::ERR_INVALID_METHODID;
1778 }
1779 const DexFile::CodeItem* code_item = m->GetCodeItem();
1780 size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1781 const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1782 const uint8_t* end = begin + byte_count;
1783 for (const uint8_t* p = begin; p != end; ++p) {
1784 bytecodes.push_back(*p);
1785 }
1786 return JDWP::ERR_NONE;
1787 }
1788
GetFieldBasicTag(JDWP::FieldId field_id)1789 JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1790 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1791 }
1792
GetStaticFieldBasicTag(JDWP::FieldId field_id)1793 JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1794 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1795 }
1796
GetFieldValueImpl(JDWP::RefTypeId ref_type_id,JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply,bool is_static)1797 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1798 JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1799 bool is_static)
1800 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1801 JDWP::JdwpError status;
1802 mirror::Class* c = DecodeClass(ref_type_id, status);
1803 if (ref_type_id != 0 && c == NULL) {
1804 return status;
1805 }
1806
1807 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1808 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1809 return JDWP::ERR_INVALID_OBJECT;
1810 }
1811 mirror::ArtField* f = FromFieldId(field_id);
1812
1813 mirror::Class* receiver_class = c;
1814 if (receiver_class == NULL && o != NULL) {
1815 receiver_class = o->GetClass();
1816 }
1817 // TODO: should we give up now if receiver_class is NULL?
1818 if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1819 LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1820 return JDWP::ERR_INVALID_FIELDID;
1821 }
1822
1823 // The RI only enforces the static/non-static mismatch in one direction.
1824 // TODO: should we change the tests and check both?
1825 if (is_static) {
1826 if (!f->IsStatic()) {
1827 return JDWP::ERR_INVALID_FIELDID;
1828 }
1829 } else {
1830 if (f->IsStatic()) {
1831 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1832 }
1833 }
1834 if (f->IsStatic()) {
1835 o = f->GetDeclaringClass();
1836 }
1837
1838 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1839 JValue field_value;
1840 if (tag == JDWP::JT_VOID) {
1841 LOG(FATAL) << "Unknown tag: " << tag;
1842 } else if (!IsPrimitiveTag(tag)) {
1843 field_value.SetL(f->GetObject(o));
1844 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1845 field_value.SetJ(f->Get64(o));
1846 } else {
1847 field_value.SetI(f->Get32(o));
1848 }
1849 Dbg::OutputJValue(tag, &field_value, pReply);
1850
1851 return JDWP::ERR_NONE;
1852 }
1853
GetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1854 JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1855 JDWP::ExpandBuf* pReply) {
1856 return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1857 }
1858
GetStaticFieldValue(JDWP::RefTypeId ref_type_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1859 JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1860 return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1861 }
1862
SetFieldValueImpl(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width,bool is_static)1863 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1864 uint64_t value, int width, bool is_static)
1865 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1866 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1867 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1868 return JDWP::ERR_INVALID_OBJECT;
1869 }
1870 mirror::ArtField* f = FromFieldId(field_id);
1871
1872 // The RI only enforces the static/non-static mismatch in one direction.
1873 // TODO: should we change the tests and check both?
1874 if (is_static) {
1875 if (!f->IsStatic()) {
1876 return JDWP::ERR_INVALID_FIELDID;
1877 }
1878 } else {
1879 if (f->IsStatic()) {
1880 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1881 }
1882 }
1883 if (f->IsStatic()) {
1884 o = f->GetDeclaringClass();
1885 }
1886
1887 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1888
1889 if (IsPrimitiveTag(tag)) {
1890 if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1891 CHECK_EQ(width, 8);
1892 // Debugging can't use transactional mode (runtime only).
1893 f->Set64<false>(o, value);
1894 } else {
1895 CHECK_LE(width, 4);
1896 // Debugging can't use transactional mode (runtime only).
1897 f->Set32<false>(o, value);
1898 }
1899 } else {
1900 mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value);
1901 if (v == ObjectRegistry::kInvalidObject) {
1902 return JDWP::ERR_INVALID_OBJECT;
1903 }
1904 if (v != NULL) {
1905 mirror::Class* field_type;
1906 {
1907 StackHandleScope<3> hs(Thread::Current());
1908 HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1909 HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1910 HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1911 field_type = FieldHelper(h_f).GetType();
1912 }
1913 if (!field_type->IsAssignableFrom(v->GetClass())) {
1914 return JDWP::ERR_INVALID_OBJECT;
1915 }
1916 }
1917 // Debugging can't use transactional mode (runtime only).
1918 f->SetObject<false>(o, v);
1919 }
1920
1921 return JDWP::ERR_NONE;
1922 }
1923
SetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width)1924 JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1925 int width) {
1926 return SetFieldValueImpl(object_id, field_id, value, width, false);
1927 }
1928
SetStaticFieldValue(JDWP::FieldId field_id,uint64_t value,int width)1929 JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1930 return SetFieldValueImpl(0, field_id, value, width, true);
1931 }
1932
StringToUtf8(JDWP::ObjectId string_id,std::string * str)1933 JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1934 mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id);
1935 if (obj == nullptr || obj == ObjectRegistry::kInvalidObject) {
1936 return JDWP::ERR_INVALID_OBJECT;
1937 }
1938 {
1939 ScopedObjectAccessUnchecked soa(Thread::Current());
1940 mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1941 if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1942 // This isn't a string.
1943 return JDWP::ERR_INVALID_STRING;
1944 }
1945 }
1946 *str = obj->AsString()->ToModifiedUtf8();
1947 return JDWP::ERR_NONE;
1948 }
1949
OutputJValue(JDWP::JdwpTag tag,const JValue * return_value,JDWP::ExpandBuf * pReply)1950 void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1951 if (IsPrimitiveTag(tag)) {
1952 expandBufAdd1(pReply, tag);
1953 if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1954 expandBufAdd1(pReply, return_value->GetI());
1955 } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1956 expandBufAdd2BE(pReply, return_value->GetI());
1957 } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1958 expandBufAdd4BE(pReply, return_value->GetI());
1959 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1960 expandBufAdd8BE(pReply, return_value->GetJ());
1961 } else {
1962 CHECK_EQ(tag, JDWP::JT_VOID);
1963 }
1964 } else {
1965 ScopedObjectAccessUnchecked soa(Thread::Current());
1966 mirror::Object* value = return_value->GetL();
1967 expandBufAdd1(pReply, TagFromObject(soa, value));
1968 expandBufAddObjectId(pReply, gRegistry->Add(value));
1969 }
1970 }
1971
GetThreadName(JDWP::ObjectId thread_id,std::string & name)1972 JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1973 ScopedObjectAccessUnchecked soa(Thread::Current());
1974 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1975 Thread* thread;
1976 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1977 if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1978 return error;
1979 }
1980
1981 // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1982 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1983 mirror::ArtField* java_lang_Thread_name_field =
1984 soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1985 mirror::String* s =
1986 reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1987 if (s != NULL) {
1988 name = s->ToModifiedUtf8();
1989 }
1990 return JDWP::ERR_NONE;
1991 }
1992
GetThreadGroup(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)1993 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1994 ScopedObjectAccessUnchecked soa(Thread::Current());
1995 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1996 if (thread_object == ObjectRegistry::kInvalidObject) {
1997 return JDWP::ERR_INVALID_OBJECT;
1998 }
1999 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
2000 // Okay, so it's an object, but is it actually a thread?
2001 JDWP::JdwpError error;
2002 {
2003 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2004 Thread* thread;
2005 error = DecodeThread(soa, thread_id, thread);
2006 }
2007 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2008 // Zombie threads are in the null group.
2009 expandBufAddObjectId(pReply, JDWP::ObjectId(0));
2010 error = JDWP::ERR_NONE;
2011 } else if (error == JDWP::ERR_NONE) {
2012 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
2013 CHECK(c != nullptr);
2014 mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
2015 CHECK(f != nullptr);
2016 mirror::Object* group = f->GetObject(thread_object);
2017 CHECK(group != nullptr);
2018 JDWP::ObjectId thread_group_id = gRegistry->Add(group);
2019 expandBufAddObjectId(pReply, thread_group_id);
2020 }
2021 soa.Self()->EndAssertNoThreadSuspension(old_cause);
2022 return error;
2023 }
2024
DecodeThreadGroup(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_group_id,JDWP::JdwpError * error)2025 static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
2026 JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
2027 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2028 mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id);
2029 if (thread_group == nullptr || thread_group == ObjectRegistry::kInvalidObject) {
2030 *error = JDWP::ERR_INVALID_OBJECT;
2031 return nullptr;
2032 }
2033 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2034 CHECK(c != nullptr);
2035 if (!c->IsAssignableFrom(thread_group->GetClass())) {
2036 // This is not a java.lang.ThreadGroup.
2037 *error = JDWP::ERR_INVALID_THREAD_GROUP;
2038 return nullptr;
2039 }
2040 *error = JDWP::ERR_NONE;
2041 return thread_group;
2042 }
2043
GetThreadGroupName(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2044 JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2045 ScopedObjectAccessUnchecked soa(Thread::Current());
2046 JDWP::JdwpError error;
2047 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2048 if (error != JDWP::ERR_NONE) {
2049 return error;
2050 }
2051 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
2052 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2053 mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
2054 CHECK(f != NULL);
2055 mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
2056 soa.Self()->EndAssertNoThreadSuspension(old_cause);
2057
2058 std::string thread_group_name(s->ToModifiedUtf8());
2059 expandBufAddUtf8String(pReply, thread_group_name);
2060 return JDWP::ERR_NONE;
2061 }
2062
GetThreadGroupParent(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2063 JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2064 ScopedObjectAccessUnchecked soa(Thread::Current());
2065 JDWP::JdwpError error;
2066 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2067 if (error != JDWP::ERR_NONE) {
2068 return error;
2069 }
2070 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
2071 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2072 CHECK(c != nullptr);
2073 mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
2074 CHECK(f != NULL);
2075 mirror::Object* parent = f->GetObject(thread_group);
2076 soa.Self()->EndAssertNoThreadSuspension(old_cause);
2077
2078 JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
2079 expandBufAddObjectId(pReply, parent_group_id);
2080 return JDWP::ERR_NONE;
2081 }
2082
GetChildThreadGroups(ScopedObjectAccessUnchecked & soa,mirror::Object * thread_group,std::vector<JDWP::ObjectId> * child_thread_group_ids)2083 static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
2084 std::vector<JDWP::ObjectId>* child_thread_group_ids)
2085 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2086 CHECK(thread_group != nullptr);
2087
2088 // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2089 mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2090 mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2091
2092 // Get the array and size out of the ArrayList<ThreadGroup>...
2093 mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2094 mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2095 mirror::ObjectArray<mirror::Object>* groups_array =
2096 array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2097 const int32_t size = size_field->GetInt(groups_array_list);
2098
2099 // Copy the first 'size' elements out of the array into the result.
2100 ObjectRegistry* registry = Dbg::GetObjectRegistry();
2101 for (int32_t i = 0; i < size; ++i) {
2102 child_thread_group_ids->push_back(registry->Add(groups_array->Get(i)));
2103 }
2104 }
2105
GetThreadGroupChildren(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2106 JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2107 JDWP::ExpandBuf* pReply) {
2108 ScopedObjectAccessUnchecked soa(Thread::Current());
2109 JDWP::JdwpError error;
2110 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2111 if (error != JDWP::ERR_NONE) {
2112 return error;
2113 }
2114
2115 // Add child threads.
2116 {
2117 std::vector<JDWP::ObjectId> child_thread_ids;
2118 GetThreads(thread_group, &child_thread_ids);
2119 expandBufAdd4BE(pReply, child_thread_ids.size());
2120 for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2121 expandBufAddObjectId(pReply, child_thread_id);
2122 }
2123 }
2124
2125 // Add child thread groups.
2126 {
2127 std::vector<JDWP::ObjectId> child_thread_groups_ids;
2128 GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
2129 expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2130 for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2131 expandBufAddObjectId(pReply, child_thread_group_id);
2132 }
2133 }
2134
2135 return JDWP::ERR_NONE;
2136 }
2137
GetSystemThreadGroupId()2138 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2139 ScopedObjectAccessUnchecked soa(Thread::Current());
2140 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2141 mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2142 return gRegistry->Add(group);
2143 }
2144
ToJdwpThreadStatus(ThreadState state)2145 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2146 switch (state) {
2147 case kBlocked:
2148 return JDWP::TS_MONITOR;
2149 case kNative:
2150 case kRunnable:
2151 case kSuspended:
2152 return JDWP::TS_RUNNING;
2153 case kSleeping:
2154 return JDWP::TS_SLEEPING;
2155 case kStarting:
2156 case kTerminated:
2157 return JDWP::TS_ZOMBIE;
2158 case kTimedWaiting:
2159 case kWaitingForCheckPointsToRun:
2160 case kWaitingForDebuggerSend:
2161 case kWaitingForDebuggerSuspension:
2162 case kWaitingForDebuggerToAttach:
2163 case kWaitingForDeoptimization:
2164 case kWaitingForGcToComplete:
2165 case kWaitingForJniOnLoad:
2166 case kWaitingForMethodTracingStart:
2167 case kWaitingForSignalCatcherOutput:
2168 case kWaitingInMainDebuggerLoop:
2169 case kWaitingInMainSignalCatcherLoop:
2170 case kWaitingPerformingGc:
2171 case kWaiting:
2172 return JDWP::TS_WAIT;
2173 // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2174 }
2175 LOG(FATAL) << "Unknown thread state: " << state;
2176 return JDWP::TS_ZOMBIE;
2177 }
2178
GetThreadStatus(JDWP::ObjectId thread_id,JDWP::JdwpThreadStatus * pThreadStatus,JDWP::JdwpSuspendStatus * pSuspendStatus)2179 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2180 JDWP::JdwpSuspendStatus* pSuspendStatus) {
2181 ScopedObjectAccess soa(Thread::Current());
2182
2183 *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2184
2185 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2186 Thread* thread;
2187 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2188 if (error != JDWP::ERR_NONE) {
2189 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2190 *pThreadStatus = JDWP::TS_ZOMBIE;
2191 return JDWP::ERR_NONE;
2192 }
2193 return error;
2194 }
2195
2196 if (IsSuspendedForDebugger(soa, thread)) {
2197 *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2198 }
2199
2200 *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2201 return JDWP::ERR_NONE;
2202 }
2203
GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)2204 JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2205 ScopedObjectAccess soa(Thread::Current());
2206 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2207 Thread* thread;
2208 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2209 if (error != JDWP::ERR_NONE) {
2210 return error;
2211 }
2212 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2213 expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2214 return JDWP::ERR_NONE;
2215 }
2216
Interrupt(JDWP::ObjectId thread_id)2217 JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2218 ScopedObjectAccess soa(Thread::Current());
2219 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2220 Thread* thread;
2221 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2222 if (error != JDWP::ERR_NONE) {
2223 return error;
2224 }
2225 thread->Interrupt(soa.Self());
2226 return JDWP::ERR_NONE;
2227 }
2228
IsInDesiredThreadGroup(ScopedObjectAccessUnchecked & soa,mirror::Object * desired_thread_group,mirror::Object * peer)2229 static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2230 mirror::Object* desired_thread_group, mirror::Object* peer)
2231 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2232 // Do we want threads from all thread groups?
2233 if (desired_thread_group == nullptr) {
2234 return true;
2235 }
2236 mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2237 DCHECK(thread_group_field != nullptr);
2238 mirror::Object* group = thread_group_field->GetObject(peer);
2239 return (group == desired_thread_group);
2240 }
2241
GetThreads(mirror::Object * thread_group,std::vector<JDWP::ObjectId> * thread_ids)2242 void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2243 ScopedObjectAccessUnchecked soa(Thread::Current());
2244 std::list<Thread*> all_threads_list;
2245 {
2246 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2247 all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2248 }
2249 for (Thread* t : all_threads_list) {
2250 if (t == Dbg::GetDebugThread()) {
2251 // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2252 // query all threads, so it's easier if we just don't tell them about this thread.
2253 continue;
2254 }
2255 if (t->IsStillStarting()) {
2256 // This thread is being started (and has been registered in the thread list). However, it is
2257 // not completely started yet so we must ignore it.
2258 continue;
2259 }
2260 mirror::Object* peer = t->GetPeer();
2261 if (peer == nullptr) {
2262 // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2263 // this thread yet.
2264 // TODO: if we identified threads to the debugger by their Thread*
2265 // rather than their peer's mirror::Object*, we could fix this.
2266 // Doing so might help us report ZOMBIE threads too.
2267 continue;
2268 }
2269 if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2270 thread_ids->push_back(gRegistry->Add(peer));
2271 }
2272 }
2273 }
2274
GetStackDepth(Thread * thread)2275 static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2276 struct CountStackDepthVisitor : public StackVisitor {
2277 explicit CountStackDepthVisitor(Thread* thread)
2278 : StackVisitor(thread, NULL), depth(0) {}
2279
2280 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2281 // annotalysis.
2282 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2283 if (!GetMethod()->IsRuntimeMethod()) {
2284 ++depth;
2285 }
2286 return true;
2287 }
2288 size_t depth;
2289 };
2290
2291 CountStackDepthVisitor visitor(thread);
2292 visitor.WalkStack();
2293 return visitor.depth;
2294 }
2295
GetThreadFrameCount(JDWP::ObjectId thread_id,size_t & result)2296 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2297 ScopedObjectAccess soa(Thread::Current());
2298 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2299 Thread* thread;
2300 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2301 if (error != JDWP::ERR_NONE) {
2302 return error;
2303 }
2304 if (!IsSuspendedForDebugger(soa, thread)) {
2305 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2306 }
2307 result = GetStackDepth(thread);
2308 return JDWP::ERR_NONE;
2309 }
2310
GetThreadFrames(JDWP::ObjectId thread_id,size_t start_frame,size_t frame_count,JDWP::ExpandBuf * buf)2311 JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2312 size_t frame_count, JDWP::ExpandBuf* buf) {
2313 class GetFrameVisitor : public StackVisitor {
2314 public:
2315 GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2316 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2317 : StackVisitor(thread, NULL), depth_(0),
2318 start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2319 expandBufAdd4BE(buf_, frame_count_);
2320 }
2321
2322 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2323 // annotalysis.
2324 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2325 if (GetMethod()->IsRuntimeMethod()) {
2326 return true; // The debugger can't do anything useful with a frame that has no Method*.
2327 }
2328 if (depth_ >= start_frame_ + frame_count_) {
2329 return false;
2330 }
2331 if (depth_ >= start_frame_) {
2332 JDWP::FrameId frame_id(GetFrameId());
2333 JDWP::JdwpLocation location;
2334 SetJdwpLocation(&location, GetMethod(), GetDexPc());
2335 VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2336 expandBufAdd8BE(buf_, frame_id);
2337 expandBufAddLocation(buf_, location);
2338 }
2339 ++depth_;
2340 return true;
2341 }
2342
2343 private:
2344 size_t depth_;
2345 const size_t start_frame_;
2346 const size_t frame_count_;
2347 JDWP::ExpandBuf* buf_;
2348 };
2349
2350 ScopedObjectAccessUnchecked soa(Thread::Current());
2351 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2352 Thread* thread;
2353 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2354 if (error != JDWP::ERR_NONE) {
2355 return error;
2356 }
2357 if (!IsSuspendedForDebugger(soa, thread)) {
2358 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2359 }
2360 GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2361 visitor.WalkStack();
2362 return JDWP::ERR_NONE;
2363 }
2364
GetThreadSelfId()2365 JDWP::ObjectId Dbg::GetThreadSelfId() {
2366 return GetThreadId(Thread::Current());
2367 }
2368
GetThreadId(Thread * thread)2369 JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2370 ScopedObjectAccessUnchecked soa(Thread::Current());
2371 return gRegistry->Add(thread->GetPeer());
2372 }
2373
SuspendVM()2374 void Dbg::SuspendVM() {
2375 Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2376 }
2377
ResumeVM()2378 void Dbg::ResumeVM() {
2379 Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
2380 }
2381
SuspendThread(JDWP::ObjectId thread_id,bool request_suspension)2382 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2383 Thread* self = Thread::Current();
2384 ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
2385 {
2386 ScopedObjectAccess soa(self);
2387 peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2388 }
2389 if (peer.get() == NULL) {
2390 return JDWP::ERR_THREAD_NOT_ALIVE;
2391 }
2392 // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2393 // trying to suspend this one.
2394 MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
2395 bool timed_out;
2396 ThreadList* thread_list = Runtime::Current()->GetThreadList();
2397 Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2398 &timed_out);
2399 if (thread != NULL) {
2400 return JDWP::ERR_NONE;
2401 } else if (timed_out) {
2402 return JDWP::ERR_INTERNAL;
2403 } else {
2404 return JDWP::ERR_THREAD_NOT_ALIVE;
2405 }
2406 }
2407
ResumeThread(JDWP::ObjectId thread_id)2408 void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2409 ScopedObjectAccessUnchecked soa(Thread::Current());
2410 mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2411 Thread* thread;
2412 {
2413 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2414 thread = Thread::FromManagedThread(soa, peer);
2415 }
2416 if (thread == NULL) {
2417 LOG(WARNING) << "No such thread for resume: " << peer;
2418 return;
2419 }
2420 bool needs_resume;
2421 {
2422 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2423 needs_resume = thread->GetSuspendCount() > 0;
2424 }
2425 if (needs_resume) {
2426 Runtime::Current()->GetThreadList()->Resume(thread, true);
2427 }
2428 }
2429
SuspendSelf()2430 void Dbg::SuspendSelf() {
2431 Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2432 }
2433
2434 struct GetThisVisitor : public StackVisitor {
GetThisVisitorart::GetThisVisitor2435 GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2436 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2437 : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2438
2439 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2440 // annotalysis.
VisitFrameart::GetThisVisitor2441 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2442 if (frame_id != GetFrameId()) {
2443 return true; // continue
2444 } else {
2445 this_object = GetThisObject();
2446 return false;
2447 }
2448 }
2449
2450 mirror::Object* this_object;
2451 JDWP::FrameId frame_id;
2452 };
2453
GetThisObject(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,JDWP::ObjectId * result)2454 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2455 JDWP::ObjectId* result) {
2456 ScopedObjectAccessUnchecked soa(Thread::Current());
2457 Thread* thread;
2458 {
2459 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2460 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2461 if (error != JDWP::ERR_NONE) {
2462 return error;
2463 }
2464 if (!IsSuspendedForDebugger(soa, thread)) {
2465 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2466 }
2467 }
2468 std::unique_ptr<Context> context(Context::Create());
2469 GetThisVisitor visitor(thread, context.get(), frame_id);
2470 visitor.WalkStack();
2471 *result = gRegistry->Add(visitor.this_object);
2472 return JDWP::ERR_NONE;
2473 }
2474
2475 // Walks the stack until we find the frame with the given FrameId.
2476 class FindFrameVisitor FINAL : public StackVisitor {
2477 public:
FindFrameVisitor(Thread * thread,Context * context,JDWP::FrameId frame_id)2478 FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2479 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2480 : StackVisitor(thread, context), frame_id_(frame_id), error_(JDWP::ERR_INVALID_FRAMEID) {}
2481
2482 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2483 // annotalysis.
VisitFrame()2484 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2485 if (GetFrameId() != frame_id_) {
2486 return true; // Not our frame, carry on.
2487 }
2488 mirror::ArtMethod* m = GetMethod();
2489 if (m->IsNative()) {
2490 // We can't read/write local value from/into native method.
2491 error_ = JDWP::ERR_OPAQUE_FRAME;
2492 } else {
2493 // We found our frame.
2494 error_ = JDWP::ERR_NONE;
2495 }
2496 return false;
2497 }
2498
GetError() const2499 JDWP::JdwpError GetError() const {
2500 return error_;
2501 }
2502
2503 private:
2504 const JDWP::FrameId frame_id_;
2505 JDWP::JdwpError error_;
2506 };
2507
GetLocalValues(JDWP::Request * request,JDWP::ExpandBuf * pReply)2508 JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
2509 JDWP::ObjectId thread_id = request->ReadThreadId();
2510 JDWP::FrameId frame_id = request->ReadFrameId();
2511
2512 ScopedObjectAccessUnchecked soa(Thread::Current());
2513 Thread* thread;
2514 {
2515 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2516 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2517 if (error != JDWP::ERR_NONE) {
2518 return error;
2519 }
2520 }
2521 // Find the frame with the given frame_id.
2522 std::unique_ptr<Context> context(Context::Create());
2523 FindFrameVisitor visitor(thread, context.get(), frame_id);
2524 visitor.WalkStack();
2525 if (visitor.GetError() != JDWP::ERR_NONE) {
2526 return visitor.GetError();
2527 }
2528
2529 // Read the values from visitor's context.
2530 int32_t slot_count = request->ReadSigned32("slot count");
2531 expandBufAdd4BE(pReply, slot_count); /* "int values" */
2532 for (int32_t i = 0; i < slot_count; ++i) {
2533 uint32_t slot = request->ReadUnsigned32("slot");
2534 JDWP::JdwpTag reqSigByte = request->ReadTag();
2535
2536 VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte;
2537
2538 size_t width = Dbg::GetTagWidth(reqSigByte);
2539 uint8_t* ptr = expandBufAddSpace(pReply, width+1);
2540 JDWP::JdwpError error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
2541 if (error != JDWP::ERR_NONE) {
2542 return error;
2543 }
2544 }
2545 return JDWP::ERR_NONE;
2546 }
2547
GetLocalValue(const StackVisitor & visitor,ScopedObjectAccessUnchecked & soa,int slot,JDWP::JdwpTag tag,uint8_t * buf,size_t width)2548 JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
2549 int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2550 mirror::ArtMethod* m = visitor.GetMethod();
2551 uint16_t reg = DemangleSlot(slot, m);
2552 // TODO: check that the tag is compatible with the actual type of the slot!
2553 // TODO: check slot is valid for this method or return INVALID_SLOT error.
2554 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2555 switch (tag) {
2556 case JDWP::JT_BOOLEAN: {
2557 CHECK_EQ(width, 1U);
2558 uint32_t intVal;
2559 if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2560 VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2561 JDWP::Set1(buf + 1, intVal != 0);
2562 } else {
2563 VLOG(jdwp) << "failed to get boolean local " << reg;
2564 return kFailureErrorCode;
2565 }
2566 break;
2567 }
2568 case JDWP::JT_BYTE: {
2569 CHECK_EQ(width, 1U);
2570 uint32_t intVal;
2571 if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2572 VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2573 JDWP::Set1(buf + 1, intVal);
2574 } else {
2575 VLOG(jdwp) << "failed to get byte local " << reg;
2576 return kFailureErrorCode;
2577 }
2578 break;
2579 }
2580 case JDWP::JT_SHORT:
2581 case JDWP::JT_CHAR: {
2582 CHECK_EQ(width, 2U);
2583 uint32_t intVal;
2584 if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2585 VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2586 JDWP::Set2BE(buf + 1, intVal);
2587 } else {
2588 VLOG(jdwp) << "failed to get short/char local " << reg;
2589 return kFailureErrorCode;
2590 }
2591 break;
2592 }
2593 case JDWP::JT_INT: {
2594 CHECK_EQ(width, 4U);
2595 uint32_t intVal;
2596 if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2597 VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2598 JDWP::Set4BE(buf + 1, intVal);
2599 } else {
2600 VLOG(jdwp) << "failed to get int local " << reg;
2601 return kFailureErrorCode;
2602 }
2603 break;
2604 }
2605 case JDWP::JT_FLOAT: {
2606 CHECK_EQ(width, 4U);
2607 uint32_t intVal;
2608 if (visitor.GetVReg(m, reg, kFloatVReg, &intVal)) {
2609 VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2610 JDWP::Set4BE(buf + 1, intVal);
2611 } else {
2612 VLOG(jdwp) << "failed to get float local " << reg;
2613 return kFailureErrorCode;
2614 }
2615 break;
2616 }
2617 case JDWP::JT_ARRAY:
2618 case JDWP::JT_CLASS_LOADER:
2619 case JDWP::JT_CLASS_OBJECT:
2620 case JDWP::JT_OBJECT:
2621 case JDWP::JT_STRING:
2622 case JDWP::JT_THREAD:
2623 case JDWP::JT_THREAD_GROUP: {
2624 CHECK_EQ(width, sizeof(JDWP::ObjectId));
2625 uint32_t intVal;
2626 if (visitor.GetVReg(m, reg, kReferenceVReg, &intVal)) {
2627 mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2628 VLOG(jdwp) << "get " << tag << " object local " << reg << " = " << o;
2629 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2630 LOG(FATAL) << "Register " << reg << " expected to hold " << tag << " object: " << o;
2631 }
2632 tag = TagFromObject(soa, o);
2633 JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
2634 } else {
2635 VLOG(jdwp) << "failed to get " << tag << " object local " << reg;
2636 return kFailureErrorCode;
2637 }
2638 break;
2639 }
2640 case JDWP::JT_DOUBLE: {
2641 CHECK_EQ(width, 8U);
2642 uint64_t longVal;
2643 if (visitor.GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2644 VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2645 JDWP::Set8BE(buf + 1, longVal);
2646 } else {
2647 VLOG(jdwp) << "failed to get double local " << reg;
2648 return kFailureErrorCode;
2649 }
2650 break;
2651 }
2652 case JDWP::JT_LONG: {
2653 CHECK_EQ(width, 8U);
2654 uint64_t longVal;
2655 if (visitor.GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2656 VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2657 JDWP::Set8BE(buf + 1, longVal);
2658 } else {
2659 VLOG(jdwp) << "failed to get long local " << reg;
2660 return kFailureErrorCode;
2661 }
2662 break;
2663 }
2664 default:
2665 LOG(FATAL) << "Unknown tag " << tag;
2666 break;
2667 }
2668
2669 // Prepend tag, which may have been updated.
2670 JDWP::Set1(buf, tag);
2671 return JDWP::ERR_NONE;
2672 }
2673
SetLocalValues(JDWP::Request * request)2674 JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
2675 JDWP::ObjectId thread_id = request->ReadThreadId();
2676 JDWP::FrameId frame_id = request->ReadFrameId();
2677
2678 ScopedObjectAccessUnchecked soa(Thread::Current());
2679 Thread* thread;
2680 {
2681 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2682 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2683 if (error != JDWP::ERR_NONE) {
2684 return error;
2685 }
2686 }
2687 // Find the frame with the given frame_id.
2688 std::unique_ptr<Context> context(Context::Create());
2689 FindFrameVisitor visitor(thread, context.get(), frame_id);
2690 visitor.WalkStack();
2691 if (visitor.GetError() != JDWP::ERR_NONE) {
2692 return visitor.GetError();
2693 }
2694
2695 // Writes the values into visitor's context.
2696 int32_t slot_count = request->ReadSigned32("slot count");
2697 for (int32_t i = 0; i < slot_count; ++i) {
2698 uint32_t slot = request->ReadUnsigned32("slot");
2699 JDWP::JdwpTag sigByte = request->ReadTag();
2700 size_t width = Dbg::GetTagWidth(sigByte);
2701 uint64_t value = request->ReadValue(width);
2702
2703 VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
2704 JDWP::JdwpError error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width);
2705 if (error != JDWP::ERR_NONE) {
2706 return error;
2707 }
2708 }
2709 return JDWP::ERR_NONE;
2710 }
2711
SetLocalValue(StackVisitor & visitor,int slot,JDWP::JdwpTag tag,uint64_t value,size_t width)2712 JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
2713 uint64_t value, size_t width) {
2714 mirror::ArtMethod* m = visitor.GetMethod();
2715 uint16_t reg = DemangleSlot(slot, m);
2716 // TODO: check that the tag is compatible with the actual type of the slot!
2717 // TODO: check slot is valid for this method or return INVALID_SLOT error.
2718 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2719 switch (tag) {
2720 case JDWP::JT_BOOLEAN:
2721 case JDWP::JT_BYTE:
2722 CHECK_EQ(width, 1U);
2723 if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
2724 VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2725 << static_cast<uint32_t>(value);
2726 return kFailureErrorCode;
2727 }
2728 break;
2729 case JDWP::JT_SHORT:
2730 case JDWP::JT_CHAR:
2731 CHECK_EQ(width, 2U);
2732 if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
2733 VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2734 << static_cast<uint32_t>(value);
2735 return kFailureErrorCode;
2736 }
2737 break;
2738 case JDWP::JT_INT:
2739 CHECK_EQ(width, 4U);
2740 if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
2741 VLOG(jdwp) << "failed to set int local " << reg << " = "
2742 << static_cast<uint32_t>(value);
2743 return kFailureErrorCode;
2744 }
2745 break;
2746 case JDWP::JT_FLOAT:
2747 CHECK_EQ(width, 4U);
2748 if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kFloatVReg)) {
2749 VLOG(jdwp) << "failed to set float local " << reg << " = "
2750 << static_cast<uint32_t>(value);
2751 return kFailureErrorCode;
2752 }
2753 break;
2754 case JDWP::JT_ARRAY:
2755 case JDWP::JT_CLASS_LOADER:
2756 case JDWP::JT_CLASS_OBJECT:
2757 case JDWP::JT_OBJECT:
2758 case JDWP::JT_STRING:
2759 case JDWP::JT_THREAD:
2760 case JDWP::JT_THREAD_GROUP: {
2761 CHECK_EQ(width, sizeof(JDWP::ObjectId));
2762 mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value));
2763 if (o == ObjectRegistry::kInvalidObject) {
2764 VLOG(jdwp) << tag << " object " << o << " is an invalid object";
2765 return JDWP::ERR_INVALID_OBJECT;
2766 } else if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2767 kReferenceVReg)) {
2768 VLOG(jdwp) << "failed to set " << tag << " object local " << reg << " = " << o;
2769 return kFailureErrorCode;
2770 }
2771 break;
2772 }
2773 case JDWP::JT_DOUBLE: {
2774 CHECK_EQ(width, 8U);
2775 if (!visitor.SetVRegPair(m, reg, value, kDoubleLoVReg, kDoubleHiVReg)) {
2776 VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
2777 return kFailureErrorCode;
2778 }
2779 break;
2780 }
2781 case JDWP::JT_LONG: {
2782 CHECK_EQ(width, 8U);
2783 if (!visitor.SetVRegPair(m, reg, value, kLongLoVReg, kLongHiVReg)) {
2784 VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
2785 return kFailureErrorCode;
2786 }
2787 break;
2788 }
2789 default:
2790 LOG(FATAL) << "Unknown tag " << tag;
2791 break;
2792 }
2793 return JDWP::ERR_NONE;
2794 }
2795
SetEventLocation(JDWP::EventLocation * location,mirror::ArtMethod * m,uint32_t dex_pc)2796 static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
2797 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2798 DCHECK(location != nullptr);
2799 if (m == nullptr) {
2800 memset(location, 0, sizeof(*location));
2801 } else {
2802 location->method = m;
2803 location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2804 }
2805 }
2806
PostLocationEvent(mirror::ArtMethod * m,int dex_pc,mirror::Object * this_object,int event_flags,const JValue * return_value)2807 void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2808 int event_flags, const JValue* return_value) {
2809 if (!IsDebuggerActive()) {
2810 return;
2811 }
2812 DCHECK(m != nullptr);
2813 DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2814 JDWP::EventLocation location;
2815 SetEventLocation(&location, m, dex_pc);
2816
2817 gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2818 }
2819
PostFieldAccessEvent(mirror::ArtMethod * m,int dex_pc,mirror::Object * this_object,mirror::ArtField * f)2820 void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2821 mirror::Object* this_object, mirror::ArtField* f) {
2822 if (!IsDebuggerActive()) {
2823 return;
2824 }
2825 DCHECK(m != nullptr);
2826 DCHECK(f != nullptr);
2827 JDWP::EventLocation location;
2828 SetEventLocation(&location, m, dex_pc);
2829
2830 gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2831 }
2832
PostFieldModificationEvent(mirror::ArtMethod * m,int dex_pc,mirror::Object * this_object,mirror::ArtField * f,const JValue * field_value)2833 void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2834 mirror::Object* this_object, mirror::ArtField* f,
2835 const JValue* field_value) {
2836 if (!IsDebuggerActive()) {
2837 return;
2838 }
2839 DCHECK(m != nullptr);
2840 DCHECK(f != nullptr);
2841 DCHECK(field_value != nullptr);
2842 JDWP::EventLocation location;
2843 SetEventLocation(&location, m, dex_pc);
2844
2845 gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2846 }
2847
PostException(const ThrowLocation & throw_location,mirror::ArtMethod * catch_method,uint32_t catch_dex_pc,mirror::Throwable * exception_object)2848 void Dbg::PostException(const ThrowLocation& throw_location,
2849 mirror::ArtMethod* catch_method,
2850 uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2851 if (!IsDebuggerActive()) {
2852 return;
2853 }
2854 JDWP::EventLocation exception_throw_location;
2855 SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2856 JDWP::EventLocation exception_catch_location;
2857 SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
2858
2859 gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
2860 throw_location.GetThis());
2861 }
2862
PostClassPrepare(mirror::Class * c)2863 void Dbg::PostClassPrepare(mirror::Class* c) {
2864 if (!IsDebuggerActive()) {
2865 return;
2866 }
2867 gJdwpState->PostClassPrepare(c);
2868 }
2869
UpdateDebugger(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * m,uint32_t dex_pc,int event_flags,const JValue * return_value)2870 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2871 mirror::ArtMethod* m, uint32_t dex_pc,
2872 int event_flags, const JValue* return_value) {
2873 if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2874 return;
2875 }
2876
2877 if (IsBreakpoint(m, dex_pc)) {
2878 event_flags |= kBreakpoint;
2879 }
2880
2881 // If the debugger is single-stepping one of our threads, check to
2882 // see if we're that thread and we've reached a step point.
2883 const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2884 DCHECK(single_step_control != nullptr);
2885 if (single_step_control->is_active) {
2886 CHECK(!m->IsNative());
2887 if (single_step_control->step_depth == JDWP::SD_INTO) {
2888 // Step into method calls. We break when the line number
2889 // or method pointer changes. If we're in SS_MIN mode, we
2890 // always stop.
2891 if (single_step_control->method != m) {
2892 event_flags |= kSingleStep;
2893 VLOG(jdwp) << "SS new method";
2894 } else if (single_step_control->step_size == JDWP::SS_MIN) {
2895 event_flags |= kSingleStep;
2896 VLOG(jdwp) << "SS new instruction";
2897 } else if (single_step_control->ContainsDexPc(dex_pc)) {
2898 event_flags |= kSingleStep;
2899 VLOG(jdwp) << "SS new line";
2900 }
2901 } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2902 // Step over method calls. We break when the line number is
2903 // different and the frame depth is <= the original frame
2904 // depth. (We can't just compare on the method, because we
2905 // might get unrolled past it by an exception, and it's tricky
2906 // to identify recursion.)
2907
2908 int stack_depth = GetStackDepth(thread);
2909
2910 if (stack_depth < single_step_control->stack_depth) {
2911 // Popped up one or more frames, always trigger.
2912 event_flags |= kSingleStep;
2913 VLOG(jdwp) << "SS method pop";
2914 } else if (stack_depth == single_step_control->stack_depth) {
2915 // Same depth, see if we moved.
2916 if (single_step_control->step_size == JDWP::SS_MIN) {
2917 event_flags |= kSingleStep;
2918 VLOG(jdwp) << "SS new instruction";
2919 } else if (single_step_control->ContainsDexPc(dex_pc)) {
2920 event_flags |= kSingleStep;
2921 VLOG(jdwp) << "SS new line";
2922 }
2923 }
2924 } else {
2925 CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2926 // Return from the current method. We break when the frame
2927 // depth pops up.
2928
2929 // This differs from the "method exit" break in that it stops
2930 // with the PC at the next instruction in the returned-to
2931 // function, rather than the end of the returning function.
2932
2933 int stack_depth = GetStackDepth(thread);
2934 if (stack_depth < single_step_control->stack_depth) {
2935 event_flags |= kSingleStep;
2936 VLOG(jdwp) << "SS method pop";
2937 }
2938 }
2939 }
2940
2941 // If there's something interesting going on, see if it matches one
2942 // of the debugger filters.
2943 if (event_flags != 0) {
2944 Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2945 }
2946 }
2947
GetReferenceCounterForEvent(uint32_t instrumentation_event)2948 size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2949 switch (instrumentation_event) {
2950 case instrumentation::Instrumentation::kMethodEntered:
2951 return &method_enter_event_ref_count_;
2952 case instrumentation::Instrumentation::kMethodExited:
2953 return &method_exit_event_ref_count_;
2954 case instrumentation::Instrumentation::kDexPcMoved:
2955 return &dex_pc_change_event_ref_count_;
2956 case instrumentation::Instrumentation::kFieldRead:
2957 return &field_read_event_ref_count_;
2958 case instrumentation::Instrumentation::kFieldWritten:
2959 return &field_write_event_ref_count_;
2960 case instrumentation::Instrumentation::kExceptionCaught:
2961 return &exception_catch_event_ref_count_;
2962 default:
2963 return nullptr;
2964 }
2965 }
2966
2967 // Process request while all mutator threads are suspended.
ProcessDeoptimizationRequest(const DeoptimizationRequest & request)2968 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2969 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2970 switch (request.GetKind()) {
2971 case DeoptimizationRequest::kNothing:
2972 LOG(WARNING) << "Ignoring empty deoptimization request.";
2973 break;
2974 case DeoptimizationRequest::kRegisterForEvent:
2975 VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
2976 request.InstrumentationEvent());
2977 instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2978 instrumentation_events_ |= request.InstrumentationEvent();
2979 break;
2980 case DeoptimizationRequest::kUnregisterForEvent:
2981 VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
2982 request.InstrumentationEvent());
2983 instrumentation->RemoveListener(&gDebugInstrumentationListener,
2984 request.InstrumentationEvent());
2985 instrumentation_events_ &= ~request.InstrumentationEvent();
2986 break;
2987 case DeoptimizationRequest::kFullDeoptimization:
2988 VLOG(jdwp) << "Deoptimize the world ...";
2989 instrumentation->DeoptimizeEverything();
2990 VLOG(jdwp) << "Deoptimize the world DONE";
2991 break;
2992 case DeoptimizationRequest::kFullUndeoptimization:
2993 VLOG(jdwp) << "Undeoptimize the world ...";
2994 instrumentation->UndeoptimizeEverything();
2995 VLOG(jdwp) << "Undeoptimize the world DONE";
2996 break;
2997 case DeoptimizationRequest::kSelectiveDeoptimization:
2998 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2999 instrumentation->Deoptimize(request.Method());
3000 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
3001 break;
3002 case DeoptimizationRequest::kSelectiveUndeoptimization:
3003 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
3004 instrumentation->Undeoptimize(request.Method());
3005 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
3006 break;
3007 default:
3008 LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
3009 break;
3010 }
3011 }
3012
DelayFullUndeoptimization()3013 void Dbg::DelayFullUndeoptimization() {
3014 if (RequiresDeoptimization()) {
3015 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3016 ++delayed_full_undeoptimization_count_;
3017 DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
3018 }
3019 }
3020
ProcessDelayedFullUndeoptimizations()3021 void Dbg::ProcessDelayedFullUndeoptimizations() {
3022 // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
3023 {
3024 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3025 while (delayed_full_undeoptimization_count_ > 0) {
3026 DeoptimizationRequest req;
3027 req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
3028 req.SetMethod(nullptr);
3029 RequestDeoptimizationLocked(req);
3030 --delayed_full_undeoptimization_count_;
3031 }
3032 }
3033 ManageDeoptimization();
3034 }
3035
RequestDeoptimization(const DeoptimizationRequest & req)3036 void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
3037 if (req.GetKind() == DeoptimizationRequest::kNothing) {
3038 // Nothing to do.
3039 return;
3040 }
3041 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3042 RequestDeoptimizationLocked(req);
3043 }
3044
RequestDeoptimizationLocked(const DeoptimizationRequest & req)3045 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3046 switch (req.GetKind()) {
3047 case DeoptimizationRequest::kRegisterForEvent: {
3048 DCHECK_NE(req.InstrumentationEvent(), 0u);
3049 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3050 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3051 req.InstrumentationEvent());
3052 if (*counter == 0) {
3053 VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3054 deoptimization_requests_.size(), req.InstrumentationEvent());
3055 deoptimization_requests_.push_back(req);
3056 }
3057 *counter = *counter + 1;
3058 break;
3059 }
3060 case DeoptimizationRequest::kUnregisterForEvent: {
3061 DCHECK_NE(req.InstrumentationEvent(), 0u);
3062 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3063 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3064 req.InstrumentationEvent());
3065 *counter = *counter - 1;
3066 if (*counter == 0) {
3067 VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3068 deoptimization_requests_.size(), req.InstrumentationEvent());
3069 deoptimization_requests_.push_back(req);
3070 }
3071 break;
3072 }
3073 case DeoptimizationRequest::kFullDeoptimization: {
3074 DCHECK(req.Method() == nullptr);
3075 if (full_deoptimization_event_count_ == 0) {
3076 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3077 << " for full deoptimization";
3078 deoptimization_requests_.push_back(req);
3079 }
3080 ++full_deoptimization_event_count_;
3081 break;
3082 }
3083 case DeoptimizationRequest::kFullUndeoptimization: {
3084 DCHECK(req.Method() == nullptr);
3085 DCHECK_GT(full_deoptimization_event_count_, 0U);
3086 --full_deoptimization_event_count_;
3087 if (full_deoptimization_event_count_ == 0) {
3088 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3089 << " for full undeoptimization";
3090 deoptimization_requests_.push_back(req);
3091 }
3092 break;
3093 }
3094 case DeoptimizationRequest::kSelectiveDeoptimization: {
3095 DCHECK(req.Method() != nullptr);
3096 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3097 << " for deoptimization of " << PrettyMethod(req.Method());
3098 deoptimization_requests_.push_back(req);
3099 break;
3100 }
3101 case DeoptimizationRequest::kSelectiveUndeoptimization: {
3102 DCHECK(req.Method() != nullptr);
3103 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3104 << " for undeoptimization of " << PrettyMethod(req.Method());
3105 deoptimization_requests_.push_back(req);
3106 break;
3107 }
3108 default: {
3109 LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3110 break;
3111 }
3112 }
3113 }
3114
ManageDeoptimization()3115 void Dbg::ManageDeoptimization() {
3116 Thread* const self = Thread::Current();
3117 {
3118 // Avoid suspend/resume if there is no pending request.
3119 MutexLock mu(self, *Locks::deoptimization_lock_);
3120 if (deoptimization_requests_.empty()) {
3121 return;
3122 }
3123 }
3124 CHECK_EQ(self->GetState(), kRunnable);
3125 self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3126 // We need to suspend mutator threads first.
3127 Runtime* const runtime = Runtime::Current();
3128 runtime->GetThreadList()->SuspendAll();
3129 const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3130 {
3131 MutexLock mu(self, *Locks::deoptimization_lock_);
3132 size_t req_index = 0;
3133 for (DeoptimizationRequest& request : deoptimization_requests_) {
3134 VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3135 ProcessDeoptimizationRequest(request);
3136 }
3137 deoptimization_requests_.clear();
3138 }
3139 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3140 runtime->GetThreadList()->ResumeAll();
3141 self->TransitionFromSuspendedToRunnable();
3142 }
3143
IsMethodPossiblyInlined(Thread * self,mirror::ArtMethod * m)3144 static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3145 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3146 const DexFile::CodeItem* code_item = m->GetCodeItem();
3147 if (code_item == nullptr) {
3148 // TODO We should not be asked to watch location in a native or abstract method so the code item
3149 // should never be null. We could just check we never encounter this case.
3150 return false;
3151 }
3152 // Note: method verifier may cause thread suspension.
3153 self->AssertThreadSuspensionIsAllowable();
3154 StackHandleScope<2> hs(self);
3155 mirror::Class* declaring_class = m->GetDeclaringClass();
3156 Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3157 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3158 verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
3159 &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3160 m->GetAccessFlags(), false, true, false);
3161 // Note: we don't need to verify the method.
3162 return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3163 }
3164
FindFirstBreakpointForMethod(mirror::ArtMethod * m)3165 static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3166 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3167 for (Breakpoint& breakpoint : gBreakpoints) {
3168 if (breakpoint.Method() == m) {
3169 return &breakpoint;
3170 }
3171 }
3172 return nullptr;
3173 }
3174
3175 // Sanity checks all existing breakpoints on the same method.
SanityCheckExistingBreakpoints(mirror::ArtMethod * m,DeoptimizationRequest::Kind deoptimization_kind)3176 static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
3177 DeoptimizationRequest::Kind deoptimization_kind)
3178 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3179 for (const Breakpoint& breakpoint : gBreakpoints) {
3180 if (breakpoint.Method() == m) {
3181 CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
3182 }
3183 }
3184 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3185 if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3186 // We should have deoptimized everything but not "selectively" deoptimized this method.
3187 CHECK(instrumentation->AreAllMethodsDeoptimized());
3188 CHECK(!instrumentation->IsDeoptimized(m));
3189 } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3190 // We should have "selectively" deoptimized this method.
3191 // Note: while we have not deoptimized everything for this method, we may have done it for
3192 // another event.
3193 CHECK(instrumentation->IsDeoptimized(m));
3194 } else {
3195 // This method does not require deoptimization.
3196 CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3197 CHECK(!instrumentation->IsDeoptimized(m));
3198 }
3199 }
3200
3201 // Returns the deoptimization kind required to set a breakpoint in a method.
3202 // If a breakpoint has already been set, we also return the first breakpoint
3203 // through the given 'existing_brkpt' pointer.
GetRequiredDeoptimizationKind(Thread * self,mirror::ArtMethod * m,const Breakpoint ** existing_brkpt)3204 static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
3205 mirror::ArtMethod* m,
3206 const Breakpoint** existing_brkpt)
3207 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3208 if (!Dbg::RequiresDeoptimization()) {
3209 // We already run in interpreter-only mode so we don't need to deoptimize anything.
3210 VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
3211 << PrettyMethod(m);
3212 return DeoptimizationRequest::kNothing;
3213 }
3214 const Breakpoint* first_breakpoint;
3215 {
3216 ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3217 first_breakpoint = FindFirstBreakpointForMethod(m);
3218 *existing_brkpt = first_breakpoint;
3219 }
3220
3221 if (first_breakpoint == nullptr) {
3222 // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3223 // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3224 // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
3225 // Therefore we must not hold any lock when we call it.
3226 bool need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3227 if (need_full_deoptimization) {
3228 VLOG(jdwp) << "Need full deoptimization because of possible inlining of method "
3229 << PrettyMethod(m);
3230 return DeoptimizationRequest::kFullDeoptimization;
3231 } else {
3232 // We don't need to deoptimize if the method has not been compiled.
3233 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3234 const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
3235 if (is_compiled) {
3236 // If the method may be called through its direct code pointer (without loading
3237 // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
3238 if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
3239 VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
3240 << "into image for compiled method " << PrettyMethod(m);
3241 return DeoptimizationRequest::kFullDeoptimization;
3242 } else {
3243 VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
3244 return DeoptimizationRequest::kSelectiveDeoptimization;
3245 }
3246 } else {
3247 // Method is not compiled: we don't need to deoptimize.
3248 VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
3249 return DeoptimizationRequest::kNothing;
3250 }
3251 }
3252 } else {
3253 // There is at least one breakpoint for this method: we don't need to deoptimize.
3254 // Let's check that all breakpoints are configured the same way for deoptimization.
3255 VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
3256 DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
3257 if (kIsDebugBuild) {
3258 ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3259 SanityCheckExistingBreakpoints(m, deoptimization_kind);
3260 }
3261 return DeoptimizationRequest::kNothing;
3262 }
3263 }
3264
3265 // Installs a breakpoint at the specified location. Also indicates through the deoptimization
3266 // request if we need to deoptimize.
WatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3267 void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3268 Thread* const self = Thread::Current();
3269 mirror::ArtMethod* m = FromMethodId(location->method_id);
3270 DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3271
3272 const Breakpoint* existing_breakpoint = nullptr;
3273 const DeoptimizationRequest::Kind deoptimization_kind =
3274 GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
3275 req->SetKind(deoptimization_kind);
3276 if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3277 req->SetMethod(m);
3278 } else {
3279 CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
3280 deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
3281 req->SetMethod(nullptr);
3282 }
3283
3284 {
3285 WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3286 // If there is at least one existing breakpoint on the same method, the new breakpoint
3287 // must have the same deoptimization kind than the existing breakpoint(s).
3288 DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
3289 if (existing_breakpoint != nullptr) {
3290 breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
3291 } else {
3292 breakpoint_deoptimization_kind = deoptimization_kind;
3293 }
3294 gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
3295 VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3296 << gBreakpoints[gBreakpoints.size() - 1];
3297 }
3298 }
3299
3300 // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3301 // request if we need to undeoptimize.
UnwatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3302 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3303 WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3304 mirror::ArtMethod* m = FromMethodId(location->method_id);
3305 DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3306 DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
3307 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3308 if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3309 VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3310 deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
3311 DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
3312 Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3313 gBreakpoints.erase(gBreakpoints.begin() + i);
3314 break;
3315 }
3316 }
3317 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3318 if (existing_breakpoint == nullptr) {
3319 // There is no more breakpoint on this method: we need to undeoptimize.
3320 if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3321 // This method required full deoptimization: we need to undeoptimize everything.
3322 req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3323 req->SetMethod(nullptr);
3324 } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3325 // This method required selective deoptimization: we need to undeoptimize only that method.
3326 req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3327 req->SetMethod(m);
3328 } else {
3329 // This method had no need for deoptimization: do nothing.
3330 CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3331 req->SetKind(DeoptimizationRequest::kNothing);
3332 req->SetMethod(nullptr);
3333 }
3334 } else {
3335 // There is at least one breakpoint for this method: we don't need to undeoptimize.
3336 req->SetKind(DeoptimizationRequest::kNothing);
3337 req->SetMethod(nullptr);
3338 if (kIsDebugBuild) {
3339 SanityCheckExistingBreakpoints(m, deoptimization_kind);
3340 }
3341 }
3342 }
3343
3344 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3345 // cause suspension if the thread is the current thread.
3346 class ScopedThreadSuspension {
3347 public:
ScopedThreadSuspension(Thread * self,JDWP::ObjectId thread_id)3348 ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3349 LOCKS_EXCLUDED(Locks::thread_list_lock_)
3350 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3351 thread_(nullptr),
3352 error_(JDWP::ERR_NONE),
3353 self_suspend_(false),
3354 other_suspend_(false) {
3355 ScopedObjectAccessUnchecked soa(self);
3356 {
3357 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3358 error_ = DecodeThread(soa, thread_id, thread_);
3359 }
3360 if (error_ == JDWP::ERR_NONE) {
3361 if (thread_ == soa.Self()) {
3362 self_suspend_ = true;
3363 } else {
3364 soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3365 jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3366 bool timed_out;
3367 Thread* suspended_thread;
3368 {
3369 // Take suspend thread lock to avoid races with threads trying to suspend this one.
3370 MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
3371 ThreadList* thread_list = Runtime::Current()->GetThreadList();
3372 suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3373 }
3374 CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3375 if (suspended_thread == nullptr) {
3376 // Thread terminated from under us while suspending.
3377 error_ = JDWP::ERR_INVALID_THREAD;
3378 } else {
3379 CHECK_EQ(suspended_thread, thread_);
3380 other_suspend_ = true;
3381 }
3382 }
3383 }
3384 }
3385
GetThread() const3386 Thread* GetThread() const {
3387 return thread_;
3388 }
3389
GetError() const3390 JDWP::JdwpError GetError() const {
3391 return error_;
3392 }
3393
~ScopedThreadSuspension()3394 ~ScopedThreadSuspension() {
3395 if (other_suspend_) {
3396 Runtime::Current()->GetThreadList()->Resume(thread_, true);
3397 }
3398 }
3399
3400 private:
3401 Thread* thread_;
3402 JDWP::JdwpError error_;
3403 bool self_suspend_;
3404 bool other_suspend_;
3405 };
3406
ConfigureStep(JDWP::ObjectId thread_id,JDWP::JdwpStepSize step_size,JDWP::JdwpStepDepth step_depth)3407 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3408 JDWP::JdwpStepDepth step_depth) {
3409 Thread* self = Thread::Current();
3410 ScopedThreadSuspension sts(self, thread_id);
3411 if (sts.GetError() != JDWP::ERR_NONE) {
3412 return sts.GetError();
3413 }
3414
3415 //
3416 // Work out what Method* we're in, the current line number, and how deep the stack currently
3417 // is for step-out.
3418 //
3419
3420 struct SingleStepStackVisitor : public StackVisitor {
3421 explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3422 int32_t* line_number)
3423 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3424 : StackVisitor(thread, NULL), single_step_control_(single_step_control),
3425 line_number_(line_number) {
3426 DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3427 single_step_control_->method = NULL;
3428 single_step_control_->stack_depth = 0;
3429 }
3430
3431 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3432 // annotalysis.
3433 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3434 mirror::ArtMethod* m = GetMethod();
3435 if (!m->IsRuntimeMethod()) {
3436 ++single_step_control_->stack_depth;
3437 if (single_step_control_->method == NULL) {
3438 mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3439 single_step_control_->method = m;
3440 *line_number_ = -1;
3441 if (dex_cache != NULL) {
3442 const DexFile& dex_file = *dex_cache->GetDexFile();
3443 *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3444 }
3445 }
3446 }
3447 return true;
3448 }
3449
3450 SingleStepControl* const single_step_control_;
3451 int32_t* const line_number_;
3452 };
3453
3454 Thread* const thread = sts.GetThread();
3455 SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3456 DCHECK(single_step_control != nullptr);
3457 int32_t line_number = -1;
3458 SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3459 visitor.WalkStack();
3460
3461 //
3462 // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3463 //
3464
3465 struct DebugCallbackContext {
3466 explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3467 const DexFile::CodeItem* code_item)
3468 : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3469 last_pc_valid(false), last_pc(0) {
3470 }
3471
3472 static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3473 DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3474 if (static_cast<int32_t>(line_number) == context->line_number_) {
3475 if (!context->last_pc_valid) {
3476 // Everything from this address until the next line change is ours.
3477 context->last_pc = address;
3478 context->last_pc_valid = true;
3479 }
3480 // Otherwise, if we're already in a valid range for this line,
3481 // just keep going (shouldn't really happen)...
3482 } else if (context->last_pc_valid) { // and the line number is new
3483 // Add everything from the last entry up until here to the set
3484 for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3485 context->single_step_control_->dex_pcs.insert(dex_pc);
3486 }
3487 context->last_pc_valid = false;
3488 }
3489 return false; // There may be multiple entries for any given line.
3490 }
3491
3492 ~DebugCallbackContext() {
3493 // If the line number was the last in the position table...
3494 if (last_pc_valid) {
3495 size_t end = code_item_->insns_size_in_code_units_;
3496 for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3497 single_step_control_->dex_pcs.insert(dex_pc);
3498 }
3499 }
3500 }
3501
3502 SingleStepControl* const single_step_control_;
3503 const int32_t line_number_;
3504 const DexFile::CodeItem* const code_item_;
3505 bool last_pc_valid;
3506 uint32_t last_pc;
3507 };
3508 single_step_control->dex_pcs.clear();
3509 mirror::ArtMethod* m = single_step_control->method;
3510 if (!m->IsNative()) {
3511 const DexFile::CodeItem* const code_item = m->GetCodeItem();
3512 DebugCallbackContext context(single_step_control, line_number, code_item);
3513 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3514 DebugCallbackContext::Callback, NULL, &context);
3515 }
3516
3517 //
3518 // Everything else...
3519 //
3520
3521 single_step_control->step_size = step_size;
3522 single_step_control->step_depth = step_depth;
3523 single_step_control->is_active = true;
3524
3525 if (VLOG_IS_ON(jdwp)) {
3526 VLOG(jdwp) << "Single-step thread: " << *thread;
3527 VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3528 VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3529 VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3530 VLOG(jdwp) << "Single-step current line: " << line_number;
3531 VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3532 VLOG(jdwp) << "Single-step dex_pc values:";
3533 for (uint32_t dex_pc : single_step_control->dex_pcs) {
3534 VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3535 }
3536 }
3537
3538 return JDWP::ERR_NONE;
3539 }
3540
UnconfigureStep(JDWP::ObjectId thread_id)3541 void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3542 ScopedObjectAccessUnchecked soa(Thread::Current());
3543 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3544 Thread* thread;
3545 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3546 if (error == JDWP::ERR_NONE) {
3547 SingleStepControl* single_step_control = thread->GetSingleStepControl();
3548 DCHECK(single_step_control != nullptr);
3549 single_step_control->Clear();
3550 }
3551 }
3552
JdwpTagToShortyChar(JDWP::JdwpTag tag)3553 static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3554 switch (tag) {
3555 default:
3556 LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3557
3558 // Primitives.
3559 case JDWP::JT_BYTE: return 'B';
3560 case JDWP::JT_CHAR: return 'C';
3561 case JDWP::JT_FLOAT: return 'F';
3562 case JDWP::JT_DOUBLE: return 'D';
3563 case JDWP::JT_INT: return 'I';
3564 case JDWP::JT_LONG: return 'J';
3565 case JDWP::JT_SHORT: return 'S';
3566 case JDWP::JT_VOID: return 'V';
3567 case JDWP::JT_BOOLEAN: return 'Z';
3568
3569 // Reference types.
3570 case JDWP::JT_ARRAY:
3571 case JDWP::JT_OBJECT:
3572 case JDWP::JT_STRING:
3573 case JDWP::JT_THREAD:
3574 case JDWP::JT_THREAD_GROUP:
3575 case JDWP::JT_CLASS_LOADER:
3576 case JDWP::JT_CLASS_OBJECT:
3577 return 'L';
3578 }
3579 }
3580
InvokeMethod(JDWP::ObjectId thread_id,JDWP::ObjectId object_id,JDWP::RefTypeId class_id,JDWP::MethodId method_id,uint32_t arg_count,uint64_t * arg_values,JDWP::JdwpTag * arg_types,uint32_t options,JDWP::JdwpTag * pResultTag,uint64_t * pResultValue,JDWP::ObjectId * pExceptionId)3581 JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3582 JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3583 uint32_t arg_count, uint64_t* arg_values,
3584 JDWP::JdwpTag* arg_types, uint32_t options,
3585 JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3586 JDWP::ObjectId* pExceptionId) {
3587 ThreadList* thread_list = Runtime::Current()->GetThreadList();
3588
3589 Thread* targetThread = NULL;
3590 DebugInvokeReq* req = NULL;
3591 Thread* self = Thread::Current();
3592 {
3593 ScopedObjectAccessUnchecked soa(self);
3594 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3595 JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3596 if (error != JDWP::ERR_NONE) {
3597 LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3598 return error;
3599 }
3600 req = targetThread->GetInvokeReq();
3601 if (!req->ready) {
3602 LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3603 return JDWP::ERR_INVALID_THREAD;
3604 }
3605
3606 /*
3607 * We currently have a bug where we don't successfully resume the
3608 * target thread if the suspend count is too deep. We're expected to
3609 * require one "resume" for each "suspend", but when asked to execute
3610 * a method we have to resume fully and then re-suspend it back to the
3611 * same level. (The easiest way to cause this is to type "suspend"
3612 * multiple times in jdb.)
3613 *
3614 * It's unclear what this means when the event specifies "resume all"
3615 * and some threads are suspended more deeply than others. This is
3616 * a rare problem, so for now we just prevent it from hanging forever
3617 * by rejecting the method invocation request. Without this, we will
3618 * be stuck waiting on a suspended thread.
3619 */
3620 int suspend_count;
3621 {
3622 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3623 suspend_count = targetThread->GetSuspendCount();
3624 }
3625 if (suspend_count > 1) {
3626 LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3627 return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here.
3628 }
3629
3630 JDWP::JdwpError status;
3631 mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3632 if (receiver == ObjectRegistry::kInvalidObject) {
3633 return JDWP::ERR_INVALID_OBJECT;
3634 }
3635
3636 mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3637 if (thread == ObjectRegistry::kInvalidObject) {
3638 return JDWP::ERR_INVALID_OBJECT;
3639 }
3640 // TODO: check that 'thread' is actually a java.lang.Thread!
3641
3642 mirror::Class* c = DecodeClass(class_id, status);
3643 if (c == NULL) {
3644 return status;
3645 }
3646
3647 mirror::ArtMethod* m = FromMethodId(method_id);
3648 if (m->IsStatic() != (receiver == NULL)) {
3649 return JDWP::ERR_INVALID_METHODID;
3650 }
3651 if (m->IsStatic()) {
3652 if (m->GetDeclaringClass() != c) {
3653 return JDWP::ERR_INVALID_METHODID;
3654 }
3655 } else {
3656 if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3657 return JDWP::ERR_INVALID_METHODID;
3658 }
3659 }
3660
3661 // Check the argument list matches the method.
3662 uint32_t shorty_len = 0;
3663 const char* shorty = m->GetShorty(&shorty_len);
3664 if (shorty_len - 1 != arg_count) {
3665 return JDWP::ERR_ILLEGAL_ARGUMENT;
3666 }
3667
3668 {
3669 StackHandleScope<3> hs(soa.Self());
3670 MethodHelper mh(hs.NewHandle(m));
3671 HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3672 HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3673 const DexFile::TypeList* types = m->GetParameterTypeList();
3674 for (size_t i = 0; i < arg_count; ++i) {
3675 if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3676 return JDWP::ERR_ILLEGAL_ARGUMENT;
3677 }
3678
3679 if (shorty[i + 1] == 'L') {
3680 // Did we really get an argument of an appropriate reference type?
3681 mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3682 mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3683 if (argument == ObjectRegistry::kInvalidObject) {
3684 return JDWP::ERR_INVALID_OBJECT;
3685 }
3686 if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3687 return JDWP::ERR_ILLEGAL_ARGUMENT;
3688 }
3689
3690 // Turn the on-the-wire ObjectId into a jobject.
3691 jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3692 v.l = gRegistry->GetJObject(arg_values[i]);
3693 }
3694 }
3695 // Update in case it moved.
3696 m = mh.GetMethod();
3697 }
3698
3699 req->receiver = receiver;
3700 req->thread = thread;
3701 req->klass = c;
3702 req->method = m;
3703 req->arg_count = arg_count;
3704 req->arg_values = arg_values;
3705 req->options = options;
3706 req->invoke_needed = true;
3707 }
3708
3709 // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3710 // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3711 // call, and it's unwise to hold it during WaitForSuspend.
3712
3713 {
3714 /*
3715 * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3716 * so we can suspend for a GC if the invoke request causes us to
3717 * run out of memory. It's also a good idea to change it before locking
3718 * the invokeReq mutex, although that should never be held for long.
3719 */
3720 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3721
3722 VLOG(jdwp) << " Transferring control to event thread";
3723 {
3724 MutexLock mu(self, req->lock);
3725
3726 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3727 VLOG(jdwp) << " Resuming all threads";
3728 thread_list->UndoDebuggerSuspensions();
3729 } else {
3730 VLOG(jdwp) << " Resuming event thread only";
3731 thread_list->Resume(targetThread, true);
3732 }
3733
3734 // Wait for the request to finish executing.
3735 while (req->invoke_needed) {
3736 req->cond.Wait(self);
3737 }
3738 }
3739 VLOG(jdwp) << " Control has returned from event thread";
3740
3741 /* wait for thread to re-suspend itself */
3742 SuspendThread(thread_id, false /* request_suspension */);
3743 self->TransitionFromSuspendedToRunnable();
3744 }
3745
3746 /*
3747 * Suspend the threads. We waited for the target thread to suspend
3748 * itself, so all we need to do is suspend the others.
3749 *
3750 * The suspendAllThreads() call will double-suspend the event thread,
3751 * so we want to resume the target thread once to keep the books straight.
3752 */
3753 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3754 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3755 VLOG(jdwp) << " Suspending all threads";
3756 thread_list->SuspendAllForDebugger();
3757 self->TransitionFromSuspendedToRunnable();
3758 VLOG(jdwp) << " Resuming event thread to balance the count";
3759 thread_list->Resume(targetThread, true);
3760 }
3761
3762 // Copy the result.
3763 *pResultTag = req->result_tag;
3764 if (IsPrimitiveTag(req->result_tag)) {
3765 *pResultValue = req->result_value.GetJ();
3766 } else {
3767 *pResultValue = gRegistry->Add(req->result_value.GetL());
3768 }
3769 *pExceptionId = req->exception;
3770 return req->error;
3771 }
3772
ExecuteMethod(DebugInvokeReq * pReq)3773 void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3774 ScopedObjectAccess soa(Thread::Current());
3775
3776 // We can be called while an exception is pending. We need
3777 // to preserve that across the method invocation.
3778 StackHandleScope<4> hs(soa.Self());
3779 auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3780 auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3781 auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3782 uint32_t old_throw_dex_pc;
3783 bool old_exception_report_flag;
3784 {
3785 ThrowLocation old_throw_location;
3786 mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3787 old_throw_this_object.Assign(old_throw_location.GetThis());
3788 old_throw_method.Assign(old_throw_location.GetMethod());
3789 old_exception.Assign(old_exception_obj);
3790 old_throw_dex_pc = old_throw_location.GetDexPc();
3791 old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3792 soa.Self()->ClearException();
3793 }
3794
3795 // Translate the method through the vtable, unless the debugger wants to suppress it.
3796 Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3797 if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3798 mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3799 if (actual_method != m.Get()) {
3800 VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3801 m.Assign(actual_method);
3802 }
3803 }
3804 VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3805 << " receiver=" << pReq->receiver
3806 << " arg_count=" << pReq->arg_count;
3807 CHECK(m.Get() != nullptr);
3808
3809 CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3810
3811 pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3812 reinterpret_cast<jvalue*>(pReq->arg_values));
3813
3814 mirror::Throwable* exception = soa.Self()->GetException(NULL);
3815 soa.Self()->ClearException();
3816 pReq->exception = gRegistry->Add(exception);
3817 pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3818 if (pReq->exception != 0) {
3819 VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
3820 << " " << exception->Dump();
3821 pReq->result_value.SetJ(0);
3822 } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3823 /* if no exception thrown, examine object result more closely */
3824 JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3825 if (new_tag != pReq->result_tag) {
3826 VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3827 pReq->result_tag = new_tag;
3828 }
3829
3830 /*
3831 * Register the object. We don't actually need an ObjectId yet,
3832 * but we do need to be sure that the GC won't move or discard the
3833 * object when we switch out of RUNNING. The ObjectId conversion
3834 * will add the object to the "do not touch" list.
3835 *
3836 * We can't use the "tracked allocation" mechanism here because
3837 * the object is going to be handed off to a different thread.
3838 */
3839 gRegistry->Add(pReq->result_value.GetL());
3840 }
3841
3842 if (old_exception.Get() != NULL) {
3843 ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3844 old_throw_dex_pc);
3845 soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3846 soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3847 }
3848 }
3849
3850 /*
3851 * "request" contains a full JDWP packet, possibly with multiple chunks. We
3852 * need to process each, accumulate the replies, and ship the whole thing
3853 * back.
3854 *
3855 * Returns "true" if we have a reply. The reply buffer is newly allocated,
3856 * and includes the chunk type/length, followed by the data.
3857 *
3858 * OLD-TODO: we currently assume that the request and reply include a single
3859 * chunk. If this becomes inconvenient we will need to adapt.
3860 */
DdmHandlePacket(JDWP::Request & request,uint8_t ** pReplyBuf,int * pReplyLen)3861 bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3862 Thread* self = Thread::Current();
3863 JNIEnv* env = self->GetJniEnv();
3864
3865 uint32_t type = request.ReadUnsigned32("type");
3866 uint32_t length = request.ReadUnsigned32("length");
3867
3868 // Create a byte[] corresponding to 'request'.
3869 size_t request_length = request.size();
3870 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3871 if (dataArray.get() == NULL) {
3872 LOG(WARNING) << "byte[] allocation failed: " << request_length;
3873 env->ExceptionClear();
3874 return false;
3875 }
3876 env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3877 request.Skip(request_length);
3878
3879 // Run through and find all chunks. [Currently just find the first.]
3880 ScopedByteArrayRO contents(env, dataArray.get());
3881 if (length != request_length) {
3882 LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3883 return false;
3884 }
3885
3886 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3887 ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3888 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3889 type, dataArray.get(), 0, length));
3890 if (env->ExceptionCheck()) {
3891 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3892 env->ExceptionDescribe();
3893 env->ExceptionClear();
3894 return false;
3895 }
3896
3897 if (chunk.get() == NULL) {
3898 return false;
3899 }
3900
3901 /*
3902 * Pull the pieces out of the chunk. We copy the results into a
3903 * newly-allocated buffer that the caller can free. We don't want to
3904 * continue using the Chunk object because nothing has a reference to it.
3905 *
3906 * We could avoid this by returning type/data/offset/length and having
3907 * the caller be aware of the object lifetime issues, but that
3908 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3909 * if we have responses for multiple chunks.
3910 *
3911 * So we're pretty much stuck with copying data around multiple times.
3912 */
3913 ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3914 jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3915 length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3916 type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3917
3918 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3919 if (length == 0 || replyData.get() == NULL) {
3920 return false;
3921 }
3922
3923 const int kChunkHdrLen = 8;
3924 uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3925 if (reply == NULL) {
3926 LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3927 return false;
3928 }
3929 JDWP::Set4BE(reply + 0, type);
3930 JDWP::Set4BE(reply + 4, length);
3931 env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3932
3933 *pReplyBuf = reply;
3934 *pReplyLen = length + kChunkHdrLen;
3935
3936 VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3937 return true;
3938 }
3939
DdmBroadcast(bool connect)3940 void Dbg::DdmBroadcast(bool connect) {
3941 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3942
3943 Thread* self = Thread::Current();
3944 if (self->GetState() != kRunnable) {
3945 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3946 /* try anyway? */
3947 }
3948
3949 JNIEnv* env = self->GetJniEnv();
3950 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3951 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3952 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3953 event);
3954 if (env->ExceptionCheck()) {
3955 LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3956 env->ExceptionDescribe();
3957 env->ExceptionClear();
3958 }
3959 }
3960
DdmConnected()3961 void Dbg::DdmConnected() {
3962 Dbg::DdmBroadcast(true);
3963 }
3964
DdmDisconnected()3965 void Dbg::DdmDisconnected() {
3966 Dbg::DdmBroadcast(false);
3967 gDdmThreadNotification = false;
3968 }
3969
3970 /*
3971 * Send a notification when a thread starts, stops, or changes its name.
3972 *
3973 * Because we broadcast the full set of threads when the notifications are
3974 * first enabled, it's possible for "thread" to be actively executing.
3975 */
DdmSendThreadNotification(Thread * t,uint32_t type)3976 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3977 if (!gDdmThreadNotification) {
3978 return;
3979 }
3980
3981 if (type == CHUNK_TYPE("THDE")) {
3982 uint8_t buf[4];
3983 JDWP::Set4BE(&buf[0], t->GetThreadId());
3984 Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3985 } else {
3986 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3987 ScopedObjectAccessUnchecked soa(Thread::Current());
3988 StackHandleScope<1> hs(soa.Self());
3989 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
3990 size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
3991 const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3992
3993 std::vector<uint8_t> bytes;
3994 JDWP::Append4BE(bytes, t->GetThreadId());
3995 JDWP::AppendUtf16BE(bytes, chars, char_count);
3996 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3997 Dbg::DdmSendChunk(type, bytes);
3998 }
3999 }
4000
DdmSetThreadNotification(bool enable)4001 void Dbg::DdmSetThreadNotification(bool enable) {
4002 // Enable/disable thread notifications.
4003 gDdmThreadNotification = enable;
4004 if (enable) {
4005 // Suspend the VM then post thread start notifications for all threads. Threads attaching will
4006 // see a suspension in progress and block until that ends. They then post their own start
4007 // notification.
4008 SuspendVM();
4009 std::list<Thread*> threads;
4010 Thread* self = Thread::Current();
4011 {
4012 MutexLock mu(self, *Locks::thread_list_lock_);
4013 threads = Runtime::Current()->GetThreadList()->GetList();
4014 }
4015 {
4016 ScopedObjectAccess soa(self);
4017 for (Thread* thread : threads) {
4018 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
4019 }
4020 }
4021 ResumeVM();
4022 }
4023 }
4024
PostThreadStartOrStop(Thread * t,uint32_t type)4025 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
4026 if (IsDebuggerActive()) {
4027 gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
4028 }
4029 Dbg::DdmSendThreadNotification(t, type);
4030 }
4031
PostThreadStart(Thread * t)4032 void Dbg::PostThreadStart(Thread* t) {
4033 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
4034 }
4035
PostThreadDeath(Thread * t)4036 void Dbg::PostThreadDeath(Thread* t) {
4037 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
4038 }
4039
DdmSendChunk(uint32_t type,size_t byte_count,const uint8_t * buf)4040 void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
4041 CHECK(buf != NULL);
4042 iovec vec[1];
4043 vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
4044 vec[0].iov_len = byte_count;
4045 Dbg::DdmSendChunkV(type, vec, 1);
4046 }
4047
DdmSendChunk(uint32_t type,const std::vector<uint8_t> & bytes)4048 void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
4049 DdmSendChunk(type, bytes.size(), &bytes[0]);
4050 }
4051
DdmSendChunkV(uint32_t type,const iovec * iov,int iov_count)4052 void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
4053 if (gJdwpState == NULL) {
4054 VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
4055 } else {
4056 gJdwpState->DdmSendChunkV(type, iov, iov_count);
4057 }
4058 }
4059
DdmHandleHpifChunk(HpifWhen when)4060 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
4061 if (when == HPIF_WHEN_NOW) {
4062 DdmSendHeapInfo(when);
4063 return true;
4064 }
4065
4066 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
4067 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
4068 return false;
4069 }
4070
4071 gDdmHpifWhen = when;
4072 return true;
4073 }
4074
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)4075 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
4076 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
4077 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
4078 return false;
4079 }
4080
4081 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
4082 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
4083 return false;
4084 }
4085
4086 if (native) {
4087 gDdmNhsgWhen = when;
4088 gDdmNhsgWhat = what;
4089 } else {
4090 gDdmHpsgWhen = when;
4091 gDdmHpsgWhat = what;
4092 }
4093 return true;
4094 }
4095
DdmSendHeapInfo(HpifWhen reason)4096 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
4097 // If there's a one-shot 'when', reset it.
4098 if (reason == gDdmHpifWhen) {
4099 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
4100 gDdmHpifWhen = HPIF_WHEN_NEVER;
4101 }
4102 }
4103
4104 /*
4105 * Chunk HPIF (client --> server)
4106 *
4107 * Heap Info. General information about the heap,
4108 * suitable for a summary display.
4109 *
4110 * [u4]: number of heaps
4111 *
4112 * For each heap:
4113 * [u4]: heap ID
4114 * [u8]: timestamp in ms since Unix epoch
4115 * [u1]: capture reason (same as 'when' value from server)
4116 * [u4]: max heap size in bytes (-Xmx)
4117 * [u4]: current heap size in bytes
4118 * [u4]: current number of bytes allocated
4119 * [u4]: current number of objects allocated
4120 */
4121 uint8_t heap_count = 1;
4122 gc::Heap* heap = Runtime::Current()->GetHeap();
4123 std::vector<uint8_t> bytes;
4124 JDWP::Append4BE(bytes, heap_count);
4125 JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
4126 JDWP::Append8BE(bytes, MilliTime());
4127 JDWP::Append1BE(bytes, reason);
4128 JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes.
4129 JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes.
4130 JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4131 JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4132 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4133 Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4134 }
4135
4136 enum HpsgSolidity {
4137 SOLIDITY_FREE = 0,
4138 SOLIDITY_HARD = 1,
4139 SOLIDITY_SOFT = 2,
4140 SOLIDITY_WEAK = 3,
4141 SOLIDITY_PHANTOM = 4,
4142 SOLIDITY_FINALIZABLE = 5,
4143 SOLIDITY_SWEEP = 6,
4144 };
4145
4146 enum HpsgKind {
4147 KIND_OBJECT = 0,
4148 KIND_CLASS_OBJECT = 1,
4149 KIND_ARRAY_1 = 2,
4150 KIND_ARRAY_2 = 3,
4151 KIND_ARRAY_4 = 4,
4152 KIND_ARRAY_8 = 5,
4153 KIND_UNKNOWN = 6,
4154 KIND_NATIVE = 7,
4155 };
4156
4157 #define HPSG_PARTIAL (1<<7)
4158 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4159
4160 class HeapChunkContext {
4161 public:
4162 // Maximum chunk size. Obtain this from the formula:
4163 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)4164 HeapChunkContext(bool merge, bool native)
4165 : buf_(16384 - 16),
4166 type_(0),
4167 merge_(merge),
4168 chunk_overhead_(0) {
4169 Reset();
4170 if (native) {
4171 type_ = CHUNK_TYPE("NHSG");
4172 } else {
4173 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4174 }
4175 }
4176
~HeapChunkContext()4177 ~HeapChunkContext() {
4178 if (p_ > &buf_[0]) {
4179 Flush();
4180 }
4181 }
4182
SetChunkOverhead(size_t chunk_overhead)4183 void SetChunkOverhead(size_t chunk_overhead) {
4184 chunk_overhead_ = chunk_overhead;
4185 }
4186
ResetStartOfNextChunk()4187 void ResetStartOfNextChunk() {
4188 startOfNextMemoryChunk_ = nullptr;
4189 }
4190
EnsureHeader(const void * chunk_ptr)4191 void EnsureHeader(const void* chunk_ptr) {
4192 if (!needHeader_) {
4193 return;
4194 }
4195
4196 // Start a new HPSx chunk.
4197 JDWP::Write4BE(&p_, 1); // Heap id (bogus; we only have one heap).
4198 JDWP::Write1BE(&p_, 8); // Size of allocation unit, in bytes.
4199
4200 JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start.
4201 JDWP::Write4BE(&p_, 0); // offset of this piece (relative to the virtual address).
4202 // [u4]: length of piece, in allocation units
4203 // We won't know this until we're done, so save the offset and stuff in a dummy value.
4204 pieceLenField_ = p_;
4205 JDWP::Write4BE(&p_, 0x55555555);
4206 needHeader_ = false;
4207 }
4208
Flush()4209 void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4210 if (pieceLenField_ == NULL) {
4211 // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4212 CHECK(needHeader_);
4213 return;
4214 }
4215 // Patch the "length of piece" field.
4216 CHECK_LE(&buf_[0], pieceLenField_);
4217 CHECK_LE(pieceLenField_, p_);
4218 JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4219
4220 Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4221 Reset();
4222 }
4223
HeapChunkCallback(void * start,void * end,size_t used_bytes,void * arg)4224 static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
4225 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4226 Locks::mutator_lock_) {
4227 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
4228 }
4229
4230 private:
4231 enum { ALLOCATION_UNIT_SIZE = 8 };
4232
Reset()4233 void Reset() {
4234 p_ = &buf_[0];
4235 ResetStartOfNextChunk();
4236 totalAllocationUnits_ = 0;
4237 needHeader_ = true;
4238 pieceLenField_ = NULL;
4239 }
4240
HeapChunkCallback(void * start,void *,size_t used_bytes)4241 void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
4242 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4243 Locks::mutator_lock_) {
4244 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4245 // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4246 if (used_bytes == 0) {
4247 if (start == NULL) {
4248 // Reset for start of new heap.
4249 startOfNextMemoryChunk_ = NULL;
4250 Flush();
4251 }
4252 // Only process in use memory so that free region information
4253 // also includes dlmalloc book keeping.
4254 return;
4255 }
4256
4257 /* If we're looking at the native heap, we'll just return
4258 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4259 */
4260 bool native = type_ == CHUNK_TYPE("NHSG");
4261
4262 // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4263 // count gaps inbetween spaces as free memory.
4264 if (startOfNextMemoryChunk_ != NULL) {
4265 // Transmit any pending free memory. Native free memory of
4266 // over kMaxFreeLen could be because of the use of mmaps, so
4267 // don't report. If not free memory then start a new segment.
4268 bool flush = true;
4269 if (start > startOfNextMemoryChunk_) {
4270 const size_t kMaxFreeLen = 2 * kPageSize;
4271 void* freeStart = startOfNextMemoryChunk_;
4272 void* freeEnd = start;
4273 size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
4274 if (!native || freeLen < kMaxFreeLen) {
4275 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4276 flush = false;
4277 }
4278 }
4279 if (flush) {
4280 startOfNextMemoryChunk_ = NULL;
4281 Flush();
4282 }
4283 }
4284 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
4285
4286 // Determine the type of this chunk.
4287 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4288 // If it's the same, we should combine them.
4289 uint8_t state = ExamineObject(obj, native);
4290 AppendChunk(state, start, used_bytes + chunk_overhead_);
4291 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4292 }
4293
AppendChunk(uint8_t state,void * ptr,size_t length)4294 void AppendChunk(uint8_t state, void* ptr, size_t length)
4295 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4296 // Make sure there's enough room left in the buffer.
4297 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4298 // 17 bytes for any header.
4299 size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4300 size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4301 if (bytesLeft < needed) {
4302 Flush();
4303 }
4304
4305 bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4306 if (bytesLeft < needed) {
4307 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4308 << needed << " bytes)";
4309 return;
4310 }
4311 EnsureHeader(ptr);
4312 // Write out the chunk description.
4313 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units.
4314 totalAllocationUnits_ += length;
4315 while (length > 256) {
4316 *p_++ = state | HPSG_PARTIAL;
4317 *p_++ = 255; // length - 1
4318 length -= 256;
4319 }
4320 *p_++ = state;
4321 *p_++ = length - 1;
4322 }
4323
ExamineObject(mirror::Object * o,bool is_native_heap)4324 uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4325 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4326 if (o == NULL) {
4327 return HPSG_STATE(SOLIDITY_FREE, 0);
4328 }
4329
4330 // It's an allocated chunk. Figure out what it is.
4331
4332 // If we're looking at the native heap, we'll just return
4333 // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
4334 if (is_native_heap) {
4335 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4336 }
4337
4338 if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
4339 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4340 }
4341
4342 mirror::Class* c = o->GetClass();
4343 if (c == NULL) {
4344 // The object was probably just created but hasn't been initialized yet.
4345 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4346 }
4347
4348 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
4349 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4350 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4351 }
4352
4353 if (c->GetClass() == nullptr) {
4354 LOG(ERROR) << "Null class of class " << c << " for object " << o;
4355 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4356 }
4357
4358 if (c->IsClassClass()) {
4359 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4360 }
4361
4362 if (c->IsArrayClass()) {
4363 if (o->IsObjectArray()) {
4364 return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4365 }
4366 switch (c->GetComponentSize()) {
4367 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4368 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4369 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4370 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4371 }
4372 }
4373
4374 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4375 }
4376
4377 std::vector<uint8_t> buf_;
4378 uint8_t* p_;
4379 uint8_t* pieceLenField_;
4380 void* startOfNextMemoryChunk_;
4381 size_t totalAllocationUnits_;
4382 uint32_t type_;
4383 bool merge_;
4384 bool needHeader_;
4385 size_t chunk_overhead_;
4386
4387 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4388 };
4389
BumpPointerSpaceCallback(mirror::Object * obj,void * arg)4390 static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4391 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4392 const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4393 HeapChunkContext::HeapChunkCallback(
4394 obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4395 }
4396
DdmSendHeapSegments(bool native)4397 void Dbg::DdmSendHeapSegments(bool native) {
4398 Dbg::HpsgWhen when;
4399 Dbg::HpsgWhat what;
4400 if (!native) {
4401 when = gDdmHpsgWhen;
4402 what = gDdmHpsgWhat;
4403 } else {
4404 when = gDdmNhsgWhen;
4405 what = gDdmNhsgWhat;
4406 }
4407 if (when == HPSG_WHEN_NEVER) {
4408 return;
4409 }
4410
4411 // Figure out what kind of chunks we'll be sending.
4412 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4413
4414 // First, send a heap start chunk.
4415 uint8_t heap_id[4];
4416 JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
4417 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4418
4419 Thread* self = Thread::Current();
4420
4421 Locks::mutator_lock_->AssertSharedHeld(self);
4422
4423 // Send a series of heap segment chunks.
4424 HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4425 if (native) {
4426 #ifdef USE_DLMALLOC
4427 dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
4428 #else
4429 UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4430 #endif
4431 } else {
4432 gc::Heap* heap = Runtime::Current()->GetHeap();
4433 for (const auto& space : heap->GetContinuousSpaces()) {
4434 if (space->IsDlMallocSpace()) {
4435 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4436 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4437 // allocation then the first sizeof(size_t) may belong to it.
4438 context.SetChunkOverhead(sizeof(size_t));
4439 space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4440 } else if (space->IsRosAllocSpace()) {
4441 context.SetChunkOverhead(0);
4442 // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
4443 // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
4444 self->TransitionFromRunnableToSuspended(kSuspended);
4445 ThreadList* tl = Runtime::Current()->GetThreadList();
4446 tl->SuspendAll();
4447 {
4448 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4449 space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4450 }
4451 tl->ResumeAll();
4452 self->TransitionFromSuspendedToRunnable();
4453 } else if (space->IsBumpPointerSpace()) {
4454 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4455 context.SetChunkOverhead(0);
4456 space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4457 } else {
4458 UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4459 }
4460 context.ResetStartOfNextChunk();
4461 }
4462 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4463 // Walk the large objects, these are not in the AllocSpace.
4464 context.SetChunkOverhead(0);
4465 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4466 }
4467
4468 // Finally, send a heap end chunk.
4469 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4470 }
4471
GetAllocTrackerMax()4472 static size_t GetAllocTrackerMax() {
4473 #ifdef HAVE_ANDROID_OS
4474 // Check whether there's a system property overriding the number of records.
4475 const char* propertyName = "dalvik.vm.allocTrackerMax";
4476 char allocRecordMaxString[PROPERTY_VALUE_MAX];
4477 if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4478 char* end;
4479 size_t value = strtoul(allocRecordMaxString, &end, 10);
4480 if (*end != '\0') {
4481 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
4482 << "' --- invalid";
4483 return kDefaultNumAllocRecords;
4484 }
4485 if (!IsPowerOfTwo(value)) {
4486 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
4487 << "' --- not power of two";
4488 return kDefaultNumAllocRecords;
4489 }
4490 return value;
4491 }
4492 #endif
4493 return kDefaultNumAllocRecords;
4494 }
4495
SetAllocTrackingEnabled(bool enable)4496 void Dbg::SetAllocTrackingEnabled(bool enable) {
4497 Thread* self = Thread::Current();
4498 if (enable) {
4499 {
4500 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4501 if (recent_allocation_records_ != NULL) {
4502 return; // Already enabled, bail.
4503 }
4504 alloc_record_max_ = GetAllocTrackerMax();
4505 LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4506 << kMaxAllocRecordStackDepth << " frames, taking "
4507 << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4508 DCHECK_EQ(alloc_record_head_, 0U);
4509 DCHECK_EQ(alloc_record_count_, 0U);
4510 recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4511 CHECK(recent_allocation_records_ != NULL);
4512 }
4513 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4514 } else {
4515 {
4516 ScopedObjectAccess soa(self); // For type_cache_.Clear();
4517 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4518 if (recent_allocation_records_ == NULL) {
4519 return; // Already disabled, bail.
4520 }
4521 LOG(INFO) << "Disabling alloc tracker";
4522 delete[] recent_allocation_records_;
4523 recent_allocation_records_ = NULL;
4524 alloc_record_head_ = 0;
4525 alloc_record_count_ = 0;
4526 type_cache_.Clear();
4527 }
4528 // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4529 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4530 }
4531 }
4532
4533 struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitorart::AllocRecordStackVisitor4534 AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4535 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4536 : StackVisitor(thread, NULL), record(record), depth(0) {}
4537
4538 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4539 // annotalysis.
VisitFrameart::AllocRecordStackVisitor4540 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4541 if (depth >= kMaxAllocRecordStackDepth) {
4542 return false;
4543 }
4544 mirror::ArtMethod* m = GetMethod();
4545 if (!m->IsRuntimeMethod()) {
4546 record->StackElement(depth)->SetMethod(m);
4547 record->StackElement(depth)->SetDexPc(GetDexPc());
4548 ++depth;
4549 }
4550 return true;
4551 }
4552
~AllocRecordStackVisitorart::AllocRecordStackVisitor4553 ~AllocRecordStackVisitor() {
4554 // Clear out any unused stack trace elements.
4555 for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4556 record->StackElement(depth)->SetMethod(nullptr);
4557 record->StackElement(depth)->SetDexPc(0);
4558 }
4559 }
4560
4561 AllocRecord* record;
4562 size_t depth;
4563 };
4564
RecordAllocation(mirror::Class * type,size_t byte_count)4565 void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4566 Thread* self = Thread::Current();
4567 CHECK(self != NULL);
4568
4569 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4570 if (recent_allocation_records_ == NULL) {
4571 // In the process of shutting down recording, bail.
4572 return;
4573 }
4574
4575 // Advance and clip.
4576 if (++alloc_record_head_ == alloc_record_max_) {
4577 alloc_record_head_ = 0;
4578 }
4579
4580 // Fill in the basics.
4581 AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4582 record->SetType(type);
4583 record->SetByteCount(byte_count);
4584 record->SetThinLockId(self->GetThreadId());
4585
4586 // Fill in the stack trace.
4587 AllocRecordStackVisitor visitor(self, record);
4588 visitor.WalkStack();
4589
4590 if (alloc_record_count_ < alloc_record_max_) {
4591 ++alloc_record_count_;
4592 }
4593 }
4594
4595 // Returns the index of the head element.
4596 //
4597 // We point at the most-recently-written record, so if alloc_record_count_ is 1
4598 // we want to use the current element. Take "head+1" and subtract count
4599 // from it.
4600 //
4601 // We need to handle underflow in our circular buffer, so we add
4602 // alloc_record_max_ and then mask it back down.
HeadIndex()4603 size_t Dbg::HeadIndex() {
4604 return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4605 (Dbg::alloc_record_max_ - 1);
4606 }
4607
DumpRecentAllocations()4608 void Dbg::DumpRecentAllocations() {
4609 ScopedObjectAccess soa(Thread::Current());
4610 MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4611 if (recent_allocation_records_ == NULL) {
4612 LOG(INFO) << "Not recording tracked allocations";
4613 return;
4614 }
4615
4616 // "i" is the head of the list. We want to start at the end of the
4617 // list and move forward to the tail.
4618 size_t i = HeadIndex();
4619 const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4620 uint16_t count = capped_count;
4621
4622 LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4623 while (count--) {
4624 AllocRecord* record = &recent_allocation_records_[i];
4625
4626 LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4627 << PrettyClass(record->Type());
4628
4629 for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4630 AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4631 mirror::ArtMethod* m = stack_element->Method();
4632 if (m == NULL) {
4633 break;
4634 }
4635 LOG(INFO) << " " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4636 }
4637
4638 // pause periodically to help logcat catch up
4639 if ((count % 5) == 0) {
4640 usleep(40000);
4641 }
4642
4643 i = (i + 1) & (alloc_record_max_ - 1);
4644 }
4645 }
4646
4647 class StringTable {
4648 public:
StringTable()4649 StringTable() {
4650 }
4651
Add(const std::string & str)4652 void Add(const std::string& str) {
4653 table_.insert(str);
4654 }
4655
Add(const char * str)4656 void Add(const char* str) {
4657 table_.insert(str);
4658 }
4659
IndexOf(const char * s) const4660 size_t IndexOf(const char* s) const {
4661 auto it = table_.find(s);
4662 if (it == table_.end()) {
4663 LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4664 }
4665 return std::distance(table_.begin(), it);
4666 }
4667
Size() const4668 size_t Size() const {
4669 return table_.size();
4670 }
4671
WriteTo(std::vector<uint8_t> & bytes) const4672 void WriteTo(std::vector<uint8_t>& bytes) const {
4673 for (const std::string& str : table_) {
4674 const char* s = str.c_str();
4675 size_t s_len = CountModifiedUtf8Chars(s);
4676 std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4677 ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4678 JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4679 }
4680 }
4681
4682 private:
4683 std::set<std::string> table_;
4684 DISALLOW_COPY_AND_ASSIGN(StringTable);
4685 };
4686
GetMethodSourceFile(mirror::ArtMethod * method)4687 static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4688 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4689 DCHECK(method != nullptr);
4690 const char* source_file = method->GetDeclaringClassSourceFile();
4691 return (source_file != nullptr) ? source_file : "";
4692 }
4693
4694 /*
4695 * The data we send to DDMS contains everything we have recorded.
4696 *
4697 * Message header (all values big-endian):
4698 * (1b) message header len (to allow future expansion); includes itself
4699 * (1b) entry header len
4700 * (1b) stack frame len
4701 * (2b) number of entries
4702 * (4b) offset to string table from start of message
4703 * (2b) number of class name strings
4704 * (2b) number of method name strings
4705 * (2b) number of source file name strings
4706 * For each entry:
4707 * (4b) total allocation size
4708 * (2b) thread id
4709 * (2b) allocated object's class name index
4710 * (1b) stack depth
4711 * For each stack frame:
4712 * (2b) method's class name
4713 * (2b) method name
4714 * (2b) method source file
4715 * (2b) line number, clipped to 32767; -2 if native; -1 if no source
4716 * (xb) class name strings
4717 * (xb) method name strings
4718 * (xb) source file strings
4719 *
4720 * As with other DDM traffic, strings are sent as a 4-byte length
4721 * followed by UTF-16 data.
4722 *
4723 * We send up 16-bit unsigned indexes into string tables. In theory there
4724 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4725 * each table, but in practice there should be far fewer.
4726 *
4727 * The chief reason for using a string table here is to keep the size of
4728 * the DDMS message to a minimum. This is partly to make the protocol
4729 * efficient, but also because we have to form the whole thing up all at
4730 * once in a memory buffer.
4731 *
4732 * We use separate string tables for class names, method names, and source
4733 * files to keep the indexes small. There will generally be no overlap
4734 * between the contents of these tables.
4735 */
GetRecentAllocations()4736 jbyteArray Dbg::GetRecentAllocations() {
4737 if (false) {
4738 DumpRecentAllocations();
4739 }
4740
4741 Thread* self = Thread::Current();
4742 std::vector<uint8_t> bytes;
4743 {
4744 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4745 //
4746 // Part 1: generate string tables.
4747 //
4748 StringTable class_names;
4749 StringTable method_names;
4750 StringTable filenames;
4751
4752 const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4753 uint16_t count = capped_count;
4754 size_t idx = HeadIndex();
4755 while (count--) {
4756 AllocRecord* record = &recent_allocation_records_[idx];
4757 std::string temp;
4758 class_names.Add(record->Type()->GetDescriptor(&temp));
4759 for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4760 mirror::ArtMethod* m = record->StackElement(i)->Method();
4761 if (m != NULL) {
4762 class_names.Add(m->GetDeclaringClassDescriptor());
4763 method_names.Add(m->GetName());
4764 filenames.Add(GetMethodSourceFile(m));
4765 }
4766 }
4767
4768 idx = (idx + 1) & (alloc_record_max_ - 1);
4769 }
4770
4771 LOG(INFO) << "allocation records: " << capped_count;
4772
4773 //
4774 // Part 2: Generate the output and store it in the buffer.
4775 //
4776
4777 // (1b) message header len (to allow future expansion); includes itself
4778 // (1b) entry header len
4779 // (1b) stack frame len
4780 const int kMessageHeaderLen = 15;
4781 const int kEntryHeaderLen = 9;
4782 const int kStackFrameLen = 8;
4783 JDWP::Append1BE(bytes, kMessageHeaderLen);
4784 JDWP::Append1BE(bytes, kEntryHeaderLen);
4785 JDWP::Append1BE(bytes, kStackFrameLen);
4786
4787 // (2b) number of entries
4788 // (4b) offset to string table from start of message
4789 // (2b) number of class name strings
4790 // (2b) number of method name strings
4791 // (2b) number of source file name strings
4792 JDWP::Append2BE(bytes, capped_count);
4793 size_t string_table_offset = bytes.size();
4794 JDWP::Append4BE(bytes, 0); // We'll patch this later...
4795 JDWP::Append2BE(bytes, class_names.Size());
4796 JDWP::Append2BE(bytes, method_names.Size());
4797 JDWP::Append2BE(bytes, filenames.Size());
4798
4799 idx = HeadIndex();
4800 std::string temp;
4801 for (count = capped_count; count != 0; --count) {
4802 // For each entry:
4803 // (4b) total allocation size
4804 // (2b) thread id
4805 // (2b) allocated object's class name index
4806 // (1b) stack depth
4807 AllocRecord* record = &recent_allocation_records_[idx];
4808 size_t stack_depth = record->GetDepth();
4809 size_t allocated_object_class_name_index =
4810 class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4811 JDWP::Append4BE(bytes, record->ByteCount());
4812 JDWP::Append2BE(bytes, record->ThinLockId());
4813 JDWP::Append2BE(bytes, allocated_object_class_name_index);
4814 JDWP::Append1BE(bytes, stack_depth);
4815
4816 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4817 // For each stack frame:
4818 // (2b) method's class name
4819 // (2b) method name
4820 // (2b) method source file
4821 // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4822 mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4823 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4824 size_t method_name_index = method_names.IndexOf(m->GetName());
4825 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4826 JDWP::Append2BE(bytes, class_name_index);
4827 JDWP::Append2BE(bytes, method_name_index);
4828 JDWP::Append2BE(bytes, file_name_index);
4829 JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4830 }
4831 idx = (idx + 1) & (alloc_record_max_ - 1);
4832 }
4833
4834 // (xb) class name strings
4835 // (xb) method name strings
4836 // (xb) source file strings
4837 JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4838 class_names.WriteTo(bytes);
4839 method_names.WriteTo(bytes);
4840 filenames.WriteTo(bytes);
4841 }
4842 JNIEnv* env = self->GetJniEnv();
4843 jbyteArray result = env->NewByteArray(bytes.size());
4844 if (result != NULL) {
4845 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4846 }
4847 return result;
4848 }
4849
Method() const4850 mirror::ArtMethod* DeoptimizationRequest::Method() const {
4851 ScopedObjectAccessUnchecked soa(Thread::Current());
4852 return soa.DecodeMethod(method_);
4853 }
4854
SetMethod(mirror::ArtMethod * m)4855 void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4856 ScopedObjectAccessUnchecked soa(Thread::Current());
4857 method_ = soa.EncodeMethod(m);
4858 }
4859
4860 } // namespace art
4861