1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "debugger.h"
18 
19 #include <sys/uio.h>
20 
21 #include <set>
22 
23 #include "arch/context.h"
24 #include "art_field-inl.h"
25 #include "art_method-inl.h"
26 #include "base/time_utils.h"
27 #include "class_linker.h"
28 #include "class_linker-inl.h"
29 #include "dex_file-inl.h"
30 #include "dex_instruction.h"
31 #include "entrypoints/runtime_asm_entrypoints.h"
32 #include "gc/accounting/card_table-inl.h"
33 #include "gc/allocation_record.h"
34 #include "gc/scoped_gc_critical_section.h"
35 #include "gc/space/large_object_space.h"
36 #include "gc/space/space-inl.h"
37 #include "handle_scope.h"
38 #include "jdwp/jdwp_priv.h"
39 #include "jdwp/object_registry.h"
40 #include "mirror/class.h"
41 #include "mirror/class-inl.h"
42 #include "mirror/class_loader.h"
43 #include "mirror/object-inl.h"
44 #include "mirror/object_array-inl.h"
45 #include "mirror/string-inl.h"
46 #include "mirror/throwable.h"
47 #include "reflection.h"
48 #include "safe_map.h"
49 #include "scoped_thread_state_change.h"
50 #include "ScopedLocalRef.h"
51 #include "ScopedPrimitiveArray.h"
52 #include "handle_scope-inl.h"
53 #include "thread_list.h"
54 #include "utf.h"
55 #include "well_known_classes.h"
56 
57 namespace art {
58 
59 // The key identifying the debugger to update instrumentation.
60 static constexpr const char* kDbgInstrumentationKey = "Debugger";
61 
62 // Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
CappedAllocRecordCount(size_t alloc_record_count)63 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
64   const size_t cap = 0xffff;
65   if (alloc_record_count > cap) {
66     return cap;
67   }
68   return alloc_record_count;
69 }
70 
71 // Takes a method and returns a 'canonical' one if the method is default (and therefore potentially
72 // copied from some other class). This ensures that the debugger does not get confused as to which
73 // method we are in.
GetCanonicalMethod(ArtMethod * m)74 static ArtMethod* GetCanonicalMethod(ArtMethod* m)
75     SHARED_REQUIRES(Locks::mutator_lock_) {
76   if (LIKELY(!m->IsDefault())) {
77     return m;
78   } else {
79     mirror::Class* declaring_class = m->GetDeclaringClass();
80     return declaring_class->FindDeclaredVirtualMethod(declaring_class->GetDexCache(),
81                                                       m->GetDexMethodIndex(),
82                                                       sizeof(void*));
83   }
84 }
85 
86 class Breakpoint : public ValueObject {
87  public:
Breakpoint(ArtMethod * method,uint32_t dex_pc,DeoptimizationRequest::Kind deoptimization_kind)88   Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind)
89     : method_(GetCanonicalMethod(method)),
90       dex_pc_(dex_pc),
91       deoptimization_kind_(deoptimization_kind) {
92     CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
93           deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
94           deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
95   }
96 
SHARED_REQUIRES(Locks::mutator_lock_)97   Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
98     : method_(other.method_),
99       dex_pc_(other.dex_pc_),
100       deoptimization_kind_(other.deoptimization_kind_) {}
101 
102   // Method() is called from root visiting, do not use ScopedObjectAccess here or it can cause
103   // GC to deadlock if another thread tries to call SuspendAll while the GC is in a runnable state.
Method() const104   ArtMethod* Method() const {
105     return method_;
106   }
107 
DexPc() const108   uint32_t DexPc() const {
109     return dex_pc_;
110   }
111 
GetDeoptimizationKind() const112   DeoptimizationRequest::Kind GetDeoptimizationKind() const {
113     return deoptimization_kind_;
114   }
115 
116   // Returns true if the method of this breakpoint and the passed in method should be considered the
117   // same. That is, they are either the same method or they are copied from the same method.
IsInMethod(ArtMethod * m) const118   bool IsInMethod(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_) {
119     return method_ == GetCanonicalMethod(m);
120   }
121 
122  private:
123   // The location of this breakpoint.
124   ArtMethod* method_;
125   uint32_t dex_pc_;
126 
127   // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
128   DeoptimizationRequest::Kind deoptimization_kind_;
129 };
130 
operator <<(std::ostream & os,const Breakpoint & rhs)131 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
132     SHARED_REQUIRES(Locks::mutator_lock_) {
133   os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
134   return os;
135 }
136 
137 class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
138  public:
DebugInstrumentationListener()139   DebugInstrumentationListener() {}
~DebugInstrumentationListener()140   virtual ~DebugInstrumentationListener() {}
141 
MethodEntered(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc)142   void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
143                      uint32_t dex_pc)
144       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
145     if (method->IsNative()) {
146       // TODO: post location events is a suspension point and native method entry stubs aren't.
147       return;
148     }
149     if (IsListeningToDexPcMoved()) {
150       // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is
151       // going to be called right after us. To avoid sending JDWP events twice for this location,
152       // we report the event in DexPcMoved. However, we must remind this is method entry so we
153       // send the METHOD_ENTRY event. And we can also group it with other events for this location
154       // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction).
155       thread->SetDebugMethodEntry();
156     } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) {
157       // We also listen to kMethodExited instrumentation event and the current instruction is a
158       // RETURN so we know the MethodExited method is going to be called right after us. To avoid
159       // sending JDWP events twice for this location, we report the event(s) in MethodExited.
160       // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can
161       // also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
162       thread->SetDebugMethodEntry();
163     } else {
164       Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
165     }
166   }
167 
MethodExited(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,const JValue & return_value)168   void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
169                     uint32_t dex_pc, const JValue& return_value)
170       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
171     if (method->IsNative()) {
172       // TODO: post location events is a suspension point and native method entry stubs aren't.
173       return;
174     }
175     uint32_t events = Dbg::kMethodExit;
176     if (thread->IsDebugMethodEntry()) {
177       // It is also the method entry.
178       DCHECK(IsReturn(method, dex_pc));
179       events |= Dbg::kMethodEntry;
180       thread->ClearDebugMethodEntry();
181     }
182     Dbg::UpdateDebugger(thread, this_object, method, dex_pc, events, &return_value);
183   }
184 
MethodUnwind(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object ATTRIBUTE_UNUSED,ArtMethod * method,uint32_t dex_pc)185   void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
186                     ArtMethod* method, uint32_t dex_pc)
187       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
188     // We're not recorded to listen to this kind of event, so complain.
189     LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
190                << " " << dex_pc;
191   }
192 
DexPcMoved(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t new_dex_pc)193   void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
194                   uint32_t new_dex_pc)
195       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
196     if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
197       // We also listen to kMethodExited instrumentation event and the current instruction is a
198       // RETURN so we know the MethodExited method is going to be called right after us. Like in
199       // MethodEntered, we delegate event reporting to MethodExited.
200       // Besides, if this RETURN instruction is the only one in the method, we can send multiple
201       // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP.
202       // Therefore, we must not clear the debug method entry flag here.
203     } else {
204       uint32_t events = 0;
205       if (thread->IsDebugMethodEntry()) {
206         // It is also the method entry.
207         events = Dbg::kMethodEntry;
208         thread->ClearDebugMethodEntry();
209       }
210       Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, events, nullptr);
211     }
212   }
213 
FieldRead(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)214   void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
215                  ArtMethod* method, uint32_t dex_pc, ArtField* field)
216       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
217     Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
218   }
219 
FieldWritten(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)220   void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
221                     ArtMethod* method, uint32_t dex_pc, ArtField* field,
222                     const JValue& field_value)
223       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
224     Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
225   }
226 
ExceptionCaught(Thread * thread ATTRIBUTE_UNUSED,mirror::Throwable * exception_object)227   void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
228       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
229     Dbg::PostException(exception_object);
230   }
231 
232   // We only care about branches in the Jit.
Branch(Thread *,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset)233   void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
234       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
235     LOG(ERROR) << "Unexpected branch event in debugger " << PrettyMethod(method)
236                << " " << dex_pc << ", " << dex_pc_offset;
237   }
238 
239   // We only care about invokes in the Jit.
InvokeVirtualOrInterface(Thread * thread ATTRIBUTE_UNUSED,mirror::Object *,ArtMethod * method,uint32_t dex_pc,ArtMethod *)240   void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
241                                 mirror::Object*,
242                                 ArtMethod* method,
243                                 uint32_t dex_pc,
244                                 ArtMethod*)
245       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
246     LOG(ERROR) << "Unexpected invoke event in debugger " << PrettyMethod(method)
247                << " " << dex_pc;
248   }
249 
250  private:
IsReturn(ArtMethod * method,uint32_t dex_pc)251   static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
252       SHARED_REQUIRES(Locks::mutator_lock_) {
253     const DexFile::CodeItem* code_item = method->GetCodeItem();
254     const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
255     return instruction->IsReturn();
256   }
257 
IsListeningToDexPcMoved()258   static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
259     return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
260   }
261 
IsListeningToMethodExit()262   static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
263     return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
264   }
265 
IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)266   static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
267       SHARED_REQUIRES(Locks::mutator_lock_) {
268     return (Dbg::GetInstrumentationEvents() & event) != 0;
269   }
270 
271   DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
272 } gDebugInstrumentationListener;
273 
274 // JDWP is allowed unless the Zygote forbids it.
275 static bool gJdwpAllowed = true;
276 
277 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
278 static bool gJdwpConfigured = false;
279 
280 // JDWP options for debugging. Only valid if IsJdwpConfigured() is true.
281 static JDWP::JdwpOptions gJdwpOptions;
282 
283 // Runtime JDWP state.
284 static JDWP::JdwpState* gJdwpState = nullptr;
285 static bool gDebuggerConnected;  // debugger or DDMS is connected.
286 
287 static bool gDdmThreadNotification = false;
288 
289 // DDMS GC-related settings.
290 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
291 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
292 static Dbg::HpsgWhat gDdmHpsgWhat;
293 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
294 static Dbg::HpsgWhat gDdmNhsgWhat;
295 
296 bool Dbg::gDebuggerActive = false;
297 bool Dbg::gDisposed = false;
298 ObjectRegistry* Dbg::gRegistry = nullptr;
299 
300 // Deoptimization support.
301 std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
302 size_t Dbg::full_deoptimization_event_count_ = 0;
303 
304 // Instrumentation event reference counters.
305 size_t Dbg::dex_pc_change_event_ref_count_ = 0;
306 size_t Dbg::method_enter_event_ref_count_ = 0;
307 size_t Dbg::method_exit_event_ref_count_ = 0;
308 size_t Dbg::field_read_event_ref_count_ = 0;
309 size_t Dbg::field_write_event_ref_count_ = 0;
310 size_t Dbg::exception_catch_event_ref_count_ = 0;
311 uint32_t Dbg::instrumentation_events_ = 0;
312 
313 // Breakpoints.
314 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
315 
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)316 void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
317   receiver.VisitRootIfNonNull(visitor, root_info);  // null for static method call.
318   klass.VisitRoot(visitor, root_info);
319 }
320 
AddDexPc(uint32_t dex_pc)321 void SingleStepControl::AddDexPc(uint32_t dex_pc) {
322   dex_pcs_.insert(dex_pc);
323 }
324 
ContainsDexPc(uint32_t dex_pc) const325 bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
326   return dex_pcs_.find(dex_pc) == dex_pcs_.end();
327 }
328 
IsBreakpoint(ArtMethod * m,uint32_t dex_pc)329 static bool IsBreakpoint(ArtMethod* m, uint32_t dex_pc)
330     REQUIRES(!Locks::breakpoint_lock_)
331     SHARED_REQUIRES(Locks::mutator_lock_) {
332   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
333   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
334     if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].IsInMethod(m)) {
335       VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
336       return true;
337     }
338   }
339   return false;
340 }
341 
IsSuspendedForDebugger(ScopedObjectAccessUnchecked & soa,Thread * thread)342 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
343     REQUIRES(!Locks::thread_suspend_count_lock_) {
344   MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
345   // A thread may be suspended for GC; in this code, we really want to know whether
346   // there's a debugger suspension active.
347   return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
348 }
349 
DecodeNonNullArray(JDWP::RefTypeId id,JDWP::JdwpError * error)350 static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
351     SHARED_REQUIRES(Locks::mutator_lock_) {
352   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
353   if (o == nullptr) {
354     *error = JDWP::ERR_INVALID_OBJECT;
355     return nullptr;
356   }
357   if (!o->IsArrayInstance()) {
358     *error = JDWP::ERR_INVALID_ARRAY;
359     return nullptr;
360   }
361   *error = JDWP::ERR_NONE;
362   return o->AsArray();
363 }
364 
DecodeClass(JDWP::RefTypeId id,JDWP::JdwpError * error)365 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
366     SHARED_REQUIRES(Locks::mutator_lock_) {
367   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
368   if (o == nullptr) {
369     *error = JDWP::ERR_INVALID_OBJECT;
370     return nullptr;
371   }
372   if (!o->IsClass()) {
373     *error = JDWP::ERR_INVALID_CLASS;
374     return nullptr;
375   }
376   *error = JDWP::ERR_NONE;
377   return o->AsClass();
378 }
379 
DecodeThread(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_id,JDWP::JdwpError * error)380 static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
381                             JDWP::JdwpError* error)
382     SHARED_REQUIRES(Locks::mutator_lock_)
383     REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
384   mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
385   if (thread_peer == nullptr) {
386     // This isn't even an object.
387     *error = JDWP::ERR_INVALID_OBJECT;
388     return nullptr;
389   }
390 
391   mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
392   if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
393     // This isn't a thread.
394     *error = JDWP::ERR_INVALID_THREAD;
395     return nullptr;
396   }
397 
398   MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
399   Thread* thread = Thread::FromManagedThread(soa, thread_peer);
400   // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
401   // zombie.
402   *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
403   return thread;
404 }
405 
BasicTagFromDescriptor(const char * descriptor)406 static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
407   // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
408   // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
409   return static_cast<JDWP::JdwpTag>(descriptor[0]);
410 }
411 
BasicTagFromClass(mirror::Class * klass)412 static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
413     SHARED_REQUIRES(Locks::mutator_lock_) {
414   std::string temp;
415   const char* descriptor = klass->GetDescriptor(&temp);
416   return BasicTagFromDescriptor(descriptor);
417 }
418 
TagFromClass(const ScopedObjectAccessUnchecked & soa,mirror::Class * c)419 static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
420     SHARED_REQUIRES(Locks::mutator_lock_) {
421   CHECK(c != nullptr);
422   if (c->IsArrayClass()) {
423     return JDWP::JT_ARRAY;
424   }
425   if (c->IsStringClass()) {
426     return JDWP::JT_STRING;
427   }
428   if (c->IsClassClass()) {
429     return JDWP::JT_CLASS_OBJECT;
430   }
431   {
432     mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
433     if (thread_class->IsAssignableFrom(c)) {
434       return JDWP::JT_THREAD;
435     }
436   }
437   {
438     mirror::Class* thread_group_class =
439         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
440     if (thread_group_class->IsAssignableFrom(c)) {
441       return JDWP::JT_THREAD_GROUP;
442     }
443   }
444   {
445     mirror::Class* class_loader_class =
446         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
447     if (class_loader_class->IsAssignableFrom(c)) {
448       return JDWP::JT_CLASS_LOADER;
449     }
450   }
451   return JDWP::JT_OBJECT;
452 }
453 
454 /*
455  * Objects declared to hold Object might actually hold a more specific
456  * type.  The debugger may take a special interest in these (e.g. it
457  * wants to display the contents of Strings), so we want to return an
458  * appropriate tag.
459  *
460  * Null objects are tagged JT_OBJECT.
461  */
TagFromObject(const ScopedObjectAccessUnchecked & soa,mirror::Object * o)462 JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
463   return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
464 }
465 
IsPrimitiveTag(JDWP::JdwpTag tag)466 static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
467   switch (tag) {
468   case JDWP::JT_BOOLEAN:
469   case JDWP::JT_BYTE:
470   case JDWP::JT_CHAR:
471   case JDWP::JT_FLOAT:
472   case JDWP::JT_DOUBLE:
473   case JDWP::JT_INT:
474   case JDWP::JT_LONG:
475   case JDWP::JT_SHORT:
476   case JDWP::JT_VOID:
477     return true;
478   default:
479     return false;
480   }
481 }
482 
StartJdwp()483 void Dbg::StartJdwp() {
484   if (!gJdwpAllowed || !IsJdwpConfigured()) {
485     // No JDWP for you!
486     return;
487   }
488 
489   CHECK(gRegistry == nullptr);
490   gRegistry = new ObjectRegistry;
491 
492   // Init JDWP if the debugger is enabled. This may connect out to a
493   // debugger, passively listen for a debugger, or block waiting for a
494   // debugger.
495   gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
496   if (gJdwpState == nullptr) {
497     // We probably failed because some other process has the port already, which means that
498     // if we don't abort the user is likely to think they're talking to us when they're actually
499     // talking to that other process.
500     LOG(FATAL) << "Debugger thread failed to initialize";
501   }
502 
503   // If a debugger has already attached, send the "welcome" message.
504   // This may cause us to suspend all threads.
505   if (gJdwpState->IsActive()) {
506     ScopedObjectAccess soa(Thread::Current());
507     gJdwpState->PostVMStart();
508   }
509 }
510 
StopJdwp()511 void Dbg::StopJdwp() {
512   // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
513   // destruction of gJdwpState).
514   if (gJdwpState != nullptr && gJdwpState->IsActive()) {
515     gJdwpState->PostVMDeath();
516   }
517   // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
518   Dispose();
519   delete gJdwpState;
520   gJdwpState = nullptr;
521   delete gRegistry;
522   gRegistry = nullptr;
523 }
524 
GcDidFinish()525 void Dbg::GcDidFinish() {
526   if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
527     ScopedObjectAccess soa(Thread::Current());
528     VLOG(jdwp) << "Sending heap info to DDM";
529     DdmSendHeapInfo(gDdmHpifWhen);
530   }
531   if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
532     ScopedObjectAccess soa(Thread::Current());
533     VLOG(jdwp) << "Dumping heap to DDM";
534     DdmSendHeapSegments(false);
535   }
536   if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
537     ScopedObjectAccess soa(Thread::Current());
538     VLOG(jdwp) << "Dumping native heap to DDM";
539     DdmSendHeapSegments(true);
540   }
541 }
542 
SetJdwpAllowed(bool allowed)543 void Dbg::SetJdwpAllowed(bool allowed) {
544   gJdwpAllowed = allowed;
545 }
546 
GetInvokeReq()547 DebugInvokeReq* Dbg::GetInvokeReq() {
548   return Thread::Current()->GetInvokeReq();
549 }
550 
GetDebugThread()551 Thread* Dbg::GetDebugThread() {
552   return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
553 }
554 
ClearWaitForEventThread()555 void Dbg::ClearWaitForEventThread() {
556   gJdwpState->ReleaseJdwpTokenForEvent();
557 }
558 
Connected()559 void Dbg::Connected() {
560   CHECK(!gDebuggerConnected);
561   VLOG(jdwp) << "JDWP has attached";
562   gDebuggerConnected = true;
563   gDisposed = false;
564 }
565 
RequiresDeoptimization()566 bool Dbg::RequiresDeoptimization() {
567   // We don't need deoptimization if everything runs with interpreter after
568   // enabling -Xint mode.
569   return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
570 }
571 
572 // Used to patch boot image method entry point to interpreter bridge.
573 class UpdateEntryPointsClassVisitor : public ClassVisitor {
574  public:
UpdateEntryPointsClassVisitor(instrumentation::Instrumentation * instrumentation)575   explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
576       : instrumentation_(instrumentation) {}
577 
operator ()(mirror::Class * klass)578   bool operator()(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
579     auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
580     for (auto& m : klass->GetMethods(pointer_size)) {
581       const void* code = m.GetEntryPointFromQuickCompiledCode();
582       if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
583           !m.IsNative() &&
584           !m.IsProxyMethod()) {
585         instrumentation_->UpdateMethodsCodeFromDebugger(&m, GetQuickToInterpreterBridge());
586       }
587     }
588     return true;
589   }
590 
591  private:
592   instrumentation::Instrumentation* const instrumentation_;
593 };
594 
GoActive()595 void Dbg::GoActive() {
596   // Enable all debugging features, including scans for breakpoints.
597   // This is a no-op if we're already active.
598   // Only called from the JDWP handler thread.
599   if (IsDebuggerActive()) {
600     return;
601   }
602 
603   Thread* const self = Thread::Current();
604   {
605     // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
606     ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
607     CHECK_EQ(gBreakpoints.size(), 0U);
608   }
609 
610   {
611     MutexLock mu(self, *Locks::deoptimization_lock_);
612     CHECK_EQ(deoptimization_requests_.size(), 0U);
613     CHECK_EQ(full_deoptimization_event_count_, 0U);
614     CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
615     CHECK_EQ(method_enter_event_ref_count_, 0U);
616     CHECK_EQ(method_exit_event_ref_count_, 0U);
617     CHECK_EQ(field_read_event_ref_count_, 0U);
618     CHECK_EQ(field_write_event_ref_count_, 0U);
619     CHECK_EQ(exception_catch_event_ref_count_, 0U);
620   }
621 
622   Runtime* runtime = Runtime::Current();
623   // Since boot image code may be AOT compiled as not debuggable, we need to patch
624   // entry points of methods in boot image to interpreter bridge.
625   // However, the performance cost of this is non-negligible during native-debugging due to the
626   // forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
627   if (!runtime->GetInstrumentation()->IsForcedInterpretOnly() && !runtime->IsNativeDebuggable()) {
628     ScopedObjectAccess soa(self);
629     UpdateEntryPointsClassVisitor visitor(runtime->GetInstrumentation());
630     runtime->GetClassLinker()->VisitClasses(&visitor);
631   }
632 
633   ScopedSuspendAll ssa(__FUNCTION__);
634   if (RequiresDeoptimization()) {
635     runtime->GetInstrumentation()->EnableDeoptimization();
636   }
637   instrumentation_events_ = 0;
638   gDebuggerActive = true;
639   LOG(INFO) << "Debugger is active";
640 }
641 
Disconnected()642 void Dbg::Disconnected() {
643   CHECK(gDebuggerConnected);
644 
645   LOG(INFO) << "Debugger is no longer active";
646 
647   // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
648   // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
649   // and clear the object registry.
650   Runtime* runtime = Runtime::Current();
651   Thread* self = Thread::Current();
652   {
653     // Required for DisableDeoptimization.
654     gc::ScopedGCCriticalSection gcs(self,
655                                     gc::kGcCauseInstrumentation,
656                                     gc::kCollectorTypeInstrumentation);
657     ScopedSuspendAll ssa(__FUNCTION__);
658     ThreadState old_state = self->SetStateUnsafe(kRunnable);
659     // Debugger may not be active at this point.
660     if (IsDebuggerActive()) {
661       {
662         // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
663         // This prevents us from having any pending deoptimization request when the debugger attaches
664         // to us again while no event has been requested yet.
665         MutexLock mu(self, *Locks::deoptimization_lock_);
666         deoptimization_requests_.clear();
667         full_deoptimization_event_count_ = 0U;
668       }
669       if (instrumentation_events_ != 0) {
670         runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
671                                                       instrumentation_events_);
672         instrumentation_events_ = 0;
673       }
674       if (RequiresDeoptimization()) {
675         runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
676       }
677       gDebuggerActive = false;
678     }
679     CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
680   }
681 
682   {
683     ScopedObjectAccess soa(self);
684     gRegistry->Clear();
685   }
686 
687   gDebuggerConnected = false;
688 }
689 
ConfigureJdwp(const JDWP::JdwpOptions & jdwp_options)690 void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
691   CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
692   gJdwpOptions = jdwp_options;
693   gJdwpConfigured = true;
694 }
695 
IsJdwpConfigured()696 bool Dbg::IsJdwpConfigured() {
697   return gJdwpConfigured;
698 }
699 
LastDebuggerActivity()700 int64_t Dbg::LastDebuggerActivity() {
701   return gJdwpState->LastDebuggerActivity();
702 }
703 
UndoDebuggerSuspensions()704 void Dbg::UndoDebuggerSuspensions() {
705   Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
706 }
707 
GetClassName(JDWP::RefTypeId class_id)708 std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
709   JDWP::JdwpError error;
710   mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
711   if (o == nullptr) {
712     if (error == JDWP::ERR_NONE) {
713       return "null";
714     } else {
715       return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
716     }
717   }
718   if (!o->IsClass()) {
719     return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
720   }
721   return GetClassName(o->AsClass());
722 }
723 
GetClassName(mirror::Class * klass)724 std::string Dbg::GetClassName(mirror::Class* klass) {
725   if (klass == nullptr) {
726     return "null";
727   }
728   std::string temp;
729   return DescriptorToName(klass->GetDescriptor(&temp));
730 }
731 
GetClassObject(JDWP::RefTypeId id,JDWP::ObjectId * class_object_id)732 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
733   JDWP::JdwpError status;
734   mirror::Class* c = DecodeClass(id, &status);
735   if (c == nullptr) {
736     *class_object_id = 0;
737     return status;
738   }
739   *class_object_id = gRegistry->Add(c);
740   return JDWP::ERR_NONE;
741 }
742 
GetSuperclass(JDWP::RefTypeId id,JDWP::RefTypeId * superclass_id)743 JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
744   JDWP::JdwpError status;
745   mirror::Class* c = DecodeClass(id, &status);
746   if (c == nullptr) {
747     *superclass_id = 0;
748     return status;
749   }
750   if (c->IsInterface()) {
751     // http://code.google.com/p/android/issues/detail?id=20856
752     *superclass_id = 0;
753   } else {
754     *superclass_id = gRegistry->Add(c->GetSuperClass());
755   }
756   return JDWP::ERR_NONE;
757 }
758 
GetClassLoader(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)759 JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
760   JDWP::JdwpError error;
761   mirror::Class* c = DecodeClass(id, &error);
762   if (c == nullptr) {
763     return error;
764   }
765   expandBufAddObjectId(pReply, gRegistry->Add(c->GetClassLoader()));
766   return JDWP::ERR_NONE;
767 }
768 
GetModifiers(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)769 JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
770   JDWP::JdwpError error;
771   mirror::Class* c = DecodeClass(id, &error);
772   if (c == nullptr) {
773     return error;
774   }
775 
776   uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
777 
778   // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
779   // not interfaces.
780   // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
781   if ((access_flags & kAccInterface) == 0) {
782     access_flags |= kAccSuper;
783   }
784 
785   expandBufAdd4BE(pReply, access_flags);
786 
787   return JDWP::ERR_NONE;
788 }
789 
GetMonitorInfo(JDWP::ObjectId object_id,JDWP::ExpandBuf * reply)790 JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
791   JDWP::JdwpError error;
792   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
793   if (o == nullptr) {
794     return JDWP::ERR_INVALID_OBJECT;
795   }
796 
797   // Ensure all threads are suspended while we read objects' lock words.
798   Thread* self = Thread::Current();
799   CHECK_EQ(self->GetState(), kRunnable);
800 
801   MonitorInfo monitor_info;
802   {
803     ScopedThreadSuspension sts(self, kSuspended);
804     ScopedSuspendAll ssa(__FUNCTION__);
805     monitor_info = MonitorInfo(o);
806   }
807   if (monitor_info.owner_ != nullptr) {
808     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
809   } else {
810     expandBufAddObjectId(reply, gRegistry->Add(nullptr));
811   }
812   expandBufAdd4BE(reply, monitor_info.entry_count_);
813   expandBufAdd4BE(reply, monitor_info.waiters_.size());
814   for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
815     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
816   }
817   return JDWP::ERR_NONE;
818 }
819 
GetOwnedMonitors(JDWP::ObjectId thread_id,std::vector<JDWP::ObjectId> * monitors,std::vector<uint32_t> * stack_depths)820 JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
821                                       std::vector<JDWP::ObjectId>* monitors,
822                                       std::vector<uint32_t>* stack_depths) {
823   struct OwnedMonitorVisitor : public StackVisitor {
824     OwnedMonitorVisitor(Thread* thread, Context* context,
825                         std::vector<JDWP::ObjectId>* monitor_vector,
826                         std::vector<uint32_t>* stack_depth_vector)
827         SHARED_REQUIRES(Locks::mutator_lock_)
828       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
829         current_stack_depth(0),
830         monitors(monitor_vector),
831         stack_depths(stack_depth_vector) {}
832 
833     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
834     // annotalysis.
835     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
836       if (!GetMethod()->IsRuntimeMethod()) {
837         Monitor::VisitLocks(this, AppendOwnedMonitors, this);
838         ++current_stack_depth;
839       }
840       return true;
841     }
842 
843     static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
844         SHARED_REQUIRES(Locks::mutator_lock_) {
845       OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
846       visitor->monitors->push_back(gRegistry->Add(owned_monitor));
847       visitor->stack_depths->push_back(visitor->current_stack_depth);
848     }
849 
850     size_t current_stack_depth;
851     std::vector<JDWP::ObjectId>* const monitors;
852     std::vector<uint32_t>* const stack_depths;
853   };
854 
855   ScopedObjectAccessUnchecked soa(Thread::Current());
856   JDWP::JdwpError error;
857   Thread* thread = DecodeThread(soa, thread_id, &error);
858   if (thread == nullptr) {
859     return error;
860   }
861   if (!IsSuspendedForDebugger(soa, thread)) {
862     return JDWP::ERR_THREAD_NOT_SUSPENDED;
863   }
864   std::unique_ptr<Context> context(Context::Create());
865   OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
866   visitor.WalkStack();
867   return JDWP::ERR_NONE;
868 }
869 
GetContendedMonitor(JDWP::ObjectId thread_id,JDWP::ObjectId * contended_monitor)870 JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
871                                          JDWP::ObjectId* contended_monitor) {
872   ScopedObjectAccessUnchecked soa(Thread::Current());
873   *contended_monitor = 0;
874   JDWP::JdwpError error;
875   Thread* thread = DecodeThread(soa, thread_id, &error);
876   if (thread == nullptr) {
877     return error;
878   }
879   if (!IsSuspendedForDebugger(soa, thread)) {
880     return JDWP::ERR_THREAD_NOT_SUSPENDED;
881   }
882   mirror::Object* contended_monitor_obj = Monitor::GetContendedMonitor(thread);
883   // Add() requires the thread_list_lock_ not held to avoid the lock
884   // level violation.
885   *contended_monitor = gRegistry->Add(contended_monitor_obj);
886   return JDWP::ERR_NONE;
887 }
888 
GetInstanceCounts(const std::vector<JDWP::RefTypeId> & class_ids,std::vector<uint64_t> * counts)889 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
890                                        std::vector<uint64_t>* counts) {
891   gc::Heap* heap = Runtime::Current()->GetHeap();
892   heap->CollectGarbage(false);
893   std::vector<mirror::Class*> classes;
894   counts->clear();
895   for (size_t i = 0; i < class_ids.size(); ++i) {
896     JDWP::JdwpError error;
897     mirror::Class* c = DecodeClass(class_ids[i], &error);
898     if (c == nullptr) {
899       return error;
900     }
901     classes.push_back(c);
902     counts->push_back(0);
903   }
904   heap->CountInstances(classes, false, &(*counts)[0]);
905   return JDWP::ERR_NONE;
906 }
907 
GetInstances(JDWP::RefTypeId class_id,int32_t max_count,std::vector<JDWP::ObjectId> * instances)908 JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
909                                   std::vector<JDWP::ObjectId>* instances) {
910   gc::Heap* heap = Runtime::Current()->GetHeap();
911   // We only want reachable instances, so do a GC.
912   heap->CollectGarbage(false);
913   JDWP::JdwpError error;
914   mirror::Class* c = DecodeClass(class_id, &error);
915   if (c == nullptr) {
916     return error;
917   }
918   std::vector<mirror::Object*> raw_instances;
919   Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
920   for (size_t i = 0; i < raw_instances.size(); ++i) {
921     instances->push_back(gRegistry->Add(raw_instances[i]));
922   }
923   return JDWP::ERR_NONE;
924 }
925 
GetReferringObjects(JDWP::ObjectId object_id,int32_t max_count,std::vector<JDWP::ObjectId> * referring_objects)926 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
927                                          std::vector<JDWP::ObjectId>* referring_objects) {
928   gc::Heap* heap = Runtime::Current()->GetHeap();
929   heap->CollectGarbage(false);
930   JDWP::JdwpError error;
931   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
932   if (o == nullptr) {
933     return JDWP::ERR_INVALID_OBJECT;
934   }
935   std::vector<mirror::Object*> raw_instances;
936   heap->GetReferringObjects(o, max_count, raw_instances);
937   for (size_t i = 0; i < raw_instances.size(); ++i) {
938     referring_objects->push_back(gRegistry->Add(raw_instances[i]));
939   }
940   return JDWP::ERR_NONE;
941 }
942 
DisableCollection(JDWP::ObjectId object_id)943 JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
944   JDWP::JdwpError error;
945   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
946   if (o == nullptr) {
947     return JDWP::ERR_INVALID_OBJECT;
948   }
949   gRegistry->DisableCollection(object_id);
950   return JDWP::ERR_NONE;
951 }
952 
EnableCollection(JDWP::ObjectId object_id)953 JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
954   JDWP::JdwpError error;
955   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
956   // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
957   // also ignores these cases and never return an error. However it's not obvious why this command
958   // should behave differently from DisableCollection and IsCollected commands. So let's be more
959   // strict and return an error if this happens.
960   if (o == nullptr) {
961     return JDWP::ERR_INVALID_OBJECT;
962   }
963   gRegistry->EnableCollection(object_id);
964   return JDWP::ERR_NONE;
965 }
966 
IsCollected(JDWP::ObjectId object_id,bool * is_collected)967 JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
968   *is_collected = true;
969   if (object_id == 0) {
970     // Null object id is invalid.
971     return JDWP::ERR_INVALID_OBJECT;
972   }
973   // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
974   // the RI seems to ignore this and assume object has been collected.
975   JDWP::JdwpError error;
976   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
977   if (o != nullptr) {
978     *is_collected = gRegistry->IsCollected(object_id);
979   }
980   return JDWP::ERR_NONE;
981 }
982 
DisposeObject(JDWP::ObjectId object_id,uint32_t reference_count)983 void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
984   gRegistry->DisposeObject(object_id, reference_count);
985 }
986 
GetTypeTag(mirror::Class * klass)987 JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
988   DCHECK(klass != nullptr);
989   if (klass->IsArrayClass()) {
990     return JDWP::TT_ARRAY;
991   } else if (klass->IsInterface()) {
992     return JDWP::TT_INTERFACE;
993   } else {
994     return JDWP::TT_CLASS;
995   }
996 }
997 
GetReflectedType(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)998 JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
999   JDWP::JdwpError error;
1000   mirror::Class* c = DecodeClass(class_id, &error);
1001   if (c == nullptr) {
1002     return error;
1003   }
1004 
1005   JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1006   expandBufAdd1(pReply, type_tag);
1007   expandBufAddRefTypeId(pReply, class_id);
1008   return JDWP::ERR_NONE;
1009 }
1010 
1011 // Get the complete list of reference classes (i.e. all classes except
1012 // the primitive types).
1013 // Returns a newly-allocated buffer full of RefTypeId values.
1014 class ClassListCreator : public ClassVisitor {
1015  public:
ClassListCreator(std::vector<JDWP::RefTypeId> * classes)1016   explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
1017 
operator ()(mirror::Class * c)1018   bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
1019     if (!c->IsPrimitive()) {
1020       classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
1021     }
1022     return true;
1023   }
1024 
1025  private:
1026   std::vector<JDWP::RefTypeId>* const classes_;
1027 };
1028 
GetClassList(std::vector<JDWP::RefTypeId> * classes)1029 void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
1030   ClassListCreator clc(classes);
1031   Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc);
1032 }
1033 
GetClassInfo(JDWP::RefTypeId class_id,JDWP::JdwpTypeTag * pTypeTag,uint32_t * pStatus,std::string * pDescriptor)1034 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1035                                   uint32_t* pStatus, std::string* pDescriptor) {
1036   JDWP::JdwpError error;
1037   mirror::Class* c = DecodeClass(class_id, &error);
1038   if (c == nullptr) {
1039     return error;
1040   }
1041 
1042   if (c->IsArrayClass()) {
1043     *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1044     *pTypeTag = JDWP::TT_ARRAY;
1045   } else {
1046     if (c->IsErroneous()) {
1047       *pStatus = JDWP::CS_ERROR;
1048     } else {
1049       *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1050     }
1051     *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1052   }
1053 
1054   if (pDescriptor != nullptr) {
1055     std::string temp;
1056     *pDescriptor = c->GetDescriptor(&temp);
1057   }
1058   return JDWP::ERR_NONE;
1059 }
1060 
FindLoadedClassBySignature(const char * descriptor,std::vector<JDWP::RefTypeId> * ids)1061 void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
1062   std::vector<mirror::Class*> classes;
1063   Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1064   ids->clear();
1065   for (size_t i = 0; i < classes.size(); ++i) {
1066     ids->push_back(gRegistry->Add(classes[i]));
1067   }
1068 }
1069 
GetReferenceType(JDWP::ObjectId object_id,JDWP::ExpandBuf * pReply)1070 JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
1071   JDWP::JdwpError error;
1072   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1073   if (o == nullptr) {
1074     return JDWP::ERR_INVALID_OBJECT;
1075   }
1076 
1077   JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1078   JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1079 
1080   expandBufAdd1(pReply, type_tag);
1081   expandBufAddRefTypeId(pReply, type_id);
1082 
1083   return JDWP::ERR_NONE;
1084 }
1085 
GetSignature(JDWP::RefTypeId class_id,std::string * signature)1086 JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1087   JDWP::JdwpError error;
1088   mirror::Class* c = DecodeClass(class_id, &error);
1089   if (c == nullptr) {
1090     return error;
1091   }
1092   std::string temp;
1093   *signature = c->GetDescriptor(&temp);
1094   return JDWP::ERR_NONE;
1095 }
1096 
GetSourceFile(JDWP::RefTypeId class_id,std::string * result)1097 JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
1098   JDWP::JdwpError error;
1099   mirror::Class* c = DecodeClass(class_id, &error);
1100   if (c == nullptr) {
1101     return error;
1102   }
1103   const char* source_file = c->GetSourceFile();
1104   if (source_file == nullptr) {
1105     return JDWP::ERR_ABSENT_INFORMATION;
1106   }
1107   *result = source_file;
1108   return JDWP::ERR_NONE;
1109 }
1110 
GetObjectTag(JDWP::ObjectId object_id,uint8_t * tag)1111 JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
1112   ScopedObjectAccessUnchecked soa(Thread::Current());
1113   JDWP::JdwpError error;
1114   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1115   if (error != JDWP::ERR_NONE) {
1116     *tag = JDWP::JT_VOID;
1117     return error;
1118   }
1119   *tag = TagFromObject(soa, o);
1120   return JDWP::ERR_NONE;
1121 }
1122 
GetTagWidth(JDWP::JdwpTag tag)1123 size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1124   switch (tag) {
1125   case JDWP::JT_VOID:
1126     return 0;
1127   case JDWP::JT_BYTE:
1128   case JDWP::JT_BOOLEAN:
1129     return 1;
1130   case JDWP::JT_CHAR:
1131   case JDWP::JT_SHORT:
1132     return 2;
1133   case JDWP::JT_FLOAT:
1134   case JDWP::JT_INT:
1135     return 4;
1136   case JDWP::JT_ARRAY:
1137   case JDWP::JT_OBJECT:
1138   case JDWP::JT_STRING:
1139   case JDWP::JT_THREAD:
1140   case JDWP::JT_THREAD_GROUP:
1141   case JDWP::JT_CLASS_LOADER:
1142   case JDWP::JT_CLASS_OBJECT:
1143     return sizeof(JDWP::ObjectId);
1144   case JDWP::JT_DOUBLE:
1145   case JDWP::JT_LONG:
1146     return 8;
1147   default:
1148     LOG(FATAL) << "Unknown tag " << tag;
1149     return -1;
1150   }
1151 }
1152 
GetArrayLength(JDWP::ObjectId array_id,int32_t * length)1153 JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
1154   JDWP::JdwpError error;
1155   mirror::Array* a = DecodeNonNullArray(array_id, &error);
1156   if (a == nullptr) {
1157     return error;
1158   }
1159   *length = a->GetLength();
1160   return JDWP::ERR_NONE;
1161 }
1162 
OutputArray(JDWP::ObjectId array_id,int offset,int count,JDWP::ExpandBuf * pReply)1163 JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1164   JDWP::JdwpError error;
1165   mirror::Array* a = DecodeNonNullArray(array_id, &error);
1166   if (a == nullptr) {
1167     return error;
1168   }
1169 
1170   if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1171     LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1172     return JDWP::ERR_INVALID_LENGTH;
1173   }
1174   JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1175   expandBufAdd1(pReply, element_tag);
1176   expandBufAdd4BE(pReply, count);
1177 
1178   if (IsPrimitiveTag(element_tag)) {
1179     size_t width = GetTagWidth(element_tag);
1180     uint8_t* dst = expandBufAddSpace(pReply, count * width);
1181     if (width == 8) {
1182       const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1183       for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1184     } else if (width == 4) {
1185       const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1186       for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1187     } else if (width == 2) {
1188       const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1189       for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1190     } else {
1191       const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1192       memcpy(dst, &src[offset * width], count * width);
1193     }
1194   } else {
1195     ScopedObjectAccessUnchecked soa(Thread::Current());
1196     mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1197     for (int i = 0; i < count; ++i) {
1198       mirror::Object* element = oa->Get(offset + i);
1199       JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1200                                                         : element_tag;
1201       expandBufAdd1(pReply, specific_tag);
1202       expandBufAddObjectId(pReply, gRegistry->Add(element));
1203     }
1204   }
1205 
1206   return JDWP::ERR_NONE;
1207 }
1208 
1209 template <typename T>
CopyArrayData(mirror::Array * a,JDWP::Request * src,int offset,int count)1210 static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count)
1211     NO_THREAD_SAFETY_ANALYSIS {
1212   // TODO: fix when annotalysis correctly handles non-member functions.
1213   DCHECK(a->GetClass()->IsPrimitiveArray());
1214 
1215   T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1216   for (int i = 0; i < count; ++i) {
1217     *dst++ = src->ReadValue(sizeof(T));
1218   }
1219 }
1220 
SetArrayElements(JDWP::ObjectId array_id,int offset,int count,JDWP::Request * request)1221 JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1222                                       JDWP::Request* request) {
1223   JDWP::JdwpError error;
1224   mirror::Array* dst = DecodeNonNullArray(array_id, &error);
1225   if (dst == nullptr) {
1226     return error;
1227   }
1228 
1229   if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1230     LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1231     return JDWP::ERR_INVALID_LENGTH;
1232   }
1233   JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1234 
1235   if (IsPrimitiveTag(element_tag)) {
1236     size_t width = GetTagWidth(element_tag);
1237     if (width == 8) {
1238       CopyArrayData<uint64_t>(dst, request, offset, count);
1239     } else if (width == 4) {
1240       CopyArrayData<uint32_t>(dst, request, offset, count);
1241     } else if (width == 2) {
1242       CopyArrayData<uint16_t>(dst, request, offset, count);
1243     } else {
1244       CopyArrayData<uint8_t>(dst, request, offset, count);
1245     }
1246   } else {
1247     mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1248     for (int i = 0; i < count; ++i) {
1249       JDWP::ObjectId id = request->ReadObjectId();
1250       mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
1251       if (error != JDWP::ERR_NONE) {
1252         return error;
1253       }
1254       // Check if the object's type is compatible with the array's type.
1255       if (o != nullptr && !o->InstanceOf(oa->GetClass()->GetComponentType())) {
1256         return JDWP::ERR_TYPE_MISMATCH;
1257       }
1258       oa->Set<false>(offset + i, o);
1259     }
1260   }
1261 
1262   return JDWP::ERR_NONE;
1263 }
1264 
CreateString(const std::string & str,JDWP::ObjectId * new_string_id)1265 JDWP::JdwpError Dbg::CreateString(const std::string& str, JDWP::ObjectId* new_string_id) {
1266   Thread* self = Thread::Current();
1267   mirror::String* new_string = mirror::String::AllocFromModifiedUtf8(self, str.c_str());
1268   if (new_string == nullptr) {
1269     DCHECK(self->IsExceptionPending());
1270     self->ClearException();
1271     LOG(ERROR) << "Could not allocate string";
1272     *new_string_id = 0;
1273     return JDWP::ERR_OUT_OF_MEMORY;
1274   }
1275   *new_string_id = gRegistry->Add(new_string);
1276   return JDWP::ERR_NONE;
1277 }
1278 
CreateObject(JDWP::RefTypeId class_id,JDWP::ObjectId * new_object_id)1279 JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) {
1280   JDWP::JdwpError error;
1281   mirror::Class* c = DecodeClass(class_id, &error);
1282   if (c == nullptr) {
1283     *new_object_id = 0;
1284     return error;
1285   }
1286   Thread* self = Thread::Current();
1287   mirror::Object* new_object;
1288   if (c->IsStringClass()) {
1289     // Special case for java.lang.String.
1290     gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
1291     mirror::SetStringCountVisitor visitor(0);
1292     new_object = mirror::String::Alloc<true>(self, 0, allocator_type, visitor);
1293   } else {
1294     new_object = c->AllocObject(self);
1295   }
1296   if (new_object == nullptr) {
1297     DCHECK(self->IsExceptionPending());
1298     self->ClearException();
1299     LOG(ERROR) << "Could not allocate object of type " << PrettyDescriptor(c);
1300     *new_object_id = 0;
1301     return JDWP::ERR_OUT_OF_MEMORY;
1302   }
1303   *new_object_id = gRegistry->Add(new_object);
1304   return JDWP::ERR_NONE;
1305 }
1306 
1307 /*
1308  * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1309  */
CreateArrayObject(JDWP::RefTypeId array_class_id,uint32_t length,JDWP::ObjectId * new_array_id)1310 JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1311                                        JDWP::ObjectId* new_array_id) {
1312   JDWP::JdwpError error;
1313   mirror::Class* c = DecodeClass(array_class_id, &error);
1314   if (c == nullptr) {
1315     *new_array_id = 0;
1316     return error;
1317   }
1318   Thread* self = Thread::Current();
1319   gc::Heap* heap = Runtime::Current()->GetHeap();
1320   mirror::Array* new_array = mirror::Array::Alloc<true>(self, c, length,
1321                                                         c->GetComponentSizeShift(),
1322                                                         heap->GetCurrentAllocator());
1323   if (new_array == nullptr) {
1324     DCHECK(self->IsExceptionPending());
1325     self->ClearException();
1326     LOG(ERROR) << "Could not allocate array of type " << PrettyDescriptor(c);
1327     *new_array_id = 0;
1328     return JDWP::ERR_OUT_OF_MEMORY;
1329   }
1330   *new_array_id = gRegistry->Add(new_array);
1331   return JDWP::ERR_NONE;
1332 }
1333 
ToFieldId(const ArtField * f)1334 JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
1335   return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1336 }
1337 
ToMethodId(ArtMethod * m)1338 static JDWP::MethodId ToMethodId(ArtMethod* m)
1339     SHARED_REQUIRES(Locks::mutator_lock_) {
1340   return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(GetCanonicalMethod(m)));
1341 }
1342 
FromFieldId(JDWP::FieldId fid)1343 static ArtField* FromFieldId(JDWP::FieldId fid)
1344     SHARED_REQUIRES(Locks::mutator_lock_) {
1345   return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
1346 }
1347 
FromMethodId(JDWP::MethodId mid)1348 static ArtMethod* FromMethodId(JDWP::MethodId mid)
1349     SHARED_REQUIRES(Locks::mutator_lock_) {
1350   return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
1351 }
1352 
MatchThread(JDWP::ObjectId expected_thread_id,Thread * event_thread)1353 bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1354   CHECK(event_thread != nullptr);
1355   JDWP::JdwpError error;
1356   mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(
1357       expected_thread_id, &error);
1358   return expected_thread_peer == event_thread->GetPeer();
1359 }
1360 
MatchLocation(const JDWP::JdwpLocation & expected_location,const JDWP::EventLocation & event_location)1361 bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1362                         const JDWP::EventLocation& event_location) {
1363   if (expected_location.dex_pc != event_location.dex_pc) {
1364     return false;
1365   }
1366   ArtMethod* m = FromMethodId(expected_location.method_id);
1367   return m == event_location.method;
1368 }
1369 
MatchType(mirror::Class * event_class,JDWP::RefTypeId class_id)1370 bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1371   if (event_class == nullptr) {
1372     return false;
1373   }
1374   JDWP::JdwpError error;
1375   mirror::Class* expected_class = DecodeClass(class_id, &error);
1376   CHECK(expected_class != nullptr);
1377   return expected_class->IsAssignableFrom(event_class);
1378 }
1379 
MatchField(JDWP::RefTypeId expected_type_id,JDWP::FieldId expected_field_id,ArtField * event_field)1380 bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1381                      ArtField* event_field) {
1382   ArtField* expected_field = FromFieldId(expected_field_id);
1383   if (expected_field != event_field) {
1384     return false;
1385   }
1386   return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1387 }
1388 
MatchInstance(JDWP::ObjectId expected_instance_id,mirror::Object * event_instance)1389 bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1390   JDWP::JdwpError error;
1391   mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
1392   return modifier_instance == event_instance;
1393 }
1394 
SetJdwpLocation(JDWP::JdwpLocation * location,ArtMethod * m,uint32_t dex_pc)1395 void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
1396   if (m == nullptr) {
1397     memset(location, 0, sizeof(*location));
1398   } else {
1399     mirror::Class* c = m->GetDeclaringClass();
1400     location->type_tag = GetTypeTag(c);
1401     location->class_id = gRegistry->AddRefType(c);
1402     location->method_id = ToMethodId(m);
1403     location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1404   }
1405 }
1406 
GetMethodName(JDWP::MethodId method_id)1407 std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
1408   ArtMethod* m = FromMethodId(method_id);
1409   if (m == nullptr) {
1410     return "null";
1411   }
1412   return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
1413 }
1414 
GetFieldName(JDWP::FieldId field_id)1415 std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
1416   ArtField* f = FromFieldId(field_id);
1417   if (f == nullptr) {
1418     return "null";
1419   }
1420   return f->GetName();
1421 }
1422 
1423 /*
1424  * Augment the access flags for synthetic methods and fields by setting
1425  * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1426  * flags not specified by the Java programming language.
1427  */
MangleAccessFlags(uint32_t accessFlags)1428 static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1429   accessFlags &= kAccJavaFlagsMask;
1430   if ((accessFlags & kAccSynthetic) != 0) {
1431     accessFlags |= 0xf0000000;
1432   }
1433   return accessFlags;
1434 }
1435 
1436 /*
1437  * Circularly shifts registers so that arguments come first. Debuggers
1438  * expect slots to begin with arguments, but dex code places them at
1439  * the end.
1440  */
MangleSlot(uint16_t slot,ArtMethod * m)1441 static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
1442     SHARED_REQUIRES(Locks::mutator_lock_) {
1443   const DexFile::CodeItem* code_item = m->GetCodeItem();
1444   if (code_item == nullptr) {
1445     // We should not get here for a method without code (native, proxy or abstract). Log it and
1446     // return the slot as is since all registers are arguments.
1447     LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1448     return slot;
1449   }
1450   uint16_t ins_size = code_item->ins_size_;
1451   uint16_t locals_size = code_item->registers_size_ - ins_size;
1452   if (slot >= locals_size) {
1453     return slot - locals_size;
1454   } else {
1455     return slot + ins_size;
1456   }
1457 }
1458 
1459 /*
1460  * Circularly shifts registers so that arguments come last. Reverts
1461  * slots to dex style argument placement.
1462  */
DemangleSlot(uint16_t slot,ArtMethod * m,JDWP::JdwpError * error)1463 static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
1464     SHARED_REQUIRES(Locks::mutator_lock_) {
1465   const DexFile::CodeItem* code_item = m->GetCodeItem();
1466   if (code_item == nullptr) {
1467     // We should not get here for a method without code (native, proxy or abstract). Log it and
1468     // return the slot as is since all registers are arguments.
1469     LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1470     uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty());
1471     if (slot < vreg_count) {
1472       *error = JDWP::ERR_NONE;
1473       return slot;
1474     }
1475   } else {
1476     if (slot < code_item->registers_size_) {
1477       uint16_t ins_size = code_item->ins_size_;
1478       uint16_t locals_size = code_item->registers_size_ - ins_size;
1479       *error = JDWP::ERR_NONE;
1480       return (slot < ins_size) ? slot + locals_size : slot - ins_size;
1481     }
1482   }
1483 
1484   // Slot is invalid in the method.
1485   LOG(ERROR) << "Invalid local slot " << slot << " for method " << PrettyMethod(m);
1486   *error = JDWP::ERR_INVALID_SLOT;
1487   return DexFile::kDexNoIndex16;
1488 }
1489 
OutputDeclaredFields(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1490 JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
1491                                           JDWP::ExpandBuf* pReply) {
1492   JDWP::JdwpError error;
1493   mirror::Class* c = DecodeClass(class_id, &error);
1494   if (c == nullptr) {
1495     return error;
1496   }
1497 
1498   size_t instance_field_count = c->NumInstanceFields();
1499   size_t static_field_count = c->NumStaticFields();
1500 
1501   expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1502 
1503   for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1504     ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
1505         c->GetStaticField(i - instance_field_count);
1506     expandBufAddFieldId(pReply, ToFieldId(f));
1507     expandBufAddUtf8String(pReply, f->GetName());
1508     expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1509     if (with_generic) {
1510       static const char genericSignature[1] = "";
1511       expandBufAddUtf8String(pReply, genericSignature);
1512     }
1513     expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1514   }
1515   return JDWP::ERR_NONE;
1516 }
1517 
OutputDeclaredMethods(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1518 JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1519                                            JDWP::ExpandBuf* pReply) {
1520   JDWP::JdwpError error;
1521   mirror::Class* c = DecodeClass(class_id, &error);
1522   if (c == nullptr) {
1523     return error;
1524   }
1525 
1526   expandBufAdd4BE(pReply, c->NumMethods());
1527 
1528   auto* cl = Runtime::Current()->GetClassLinker();
1529   auto ptr_size = cl->GetImagePointerSize();
1530   for (ArtMethod& m : c->GetMethods(ptr_size)) {
1531     expandBufAddMethodId(pReply, ToMethodId(&m));
1532     expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
1533     expandBufAddUtf8String(pReply,
1534                            m.GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
1535     if (with_generic) {
1536       const char* generic_signature = "";
1537       expandBufAddUtf8String(pReply, generic_signature);
1538     }
1539     expandBufAdd4BE(pReply, MangleAccessFlags(m.GetAccessFlags()));
1540   }
1541   return JDWP::ERR_NONE;
1542 }
1543 
OutputDeclaredInterfaces(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1544 JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1545   JDWP::JdwpError error;
1546   Thread* self = Thread::Current();
1547   StackHandleScope<1> hs(self);
1548   Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, &error)));
1549   if (c.Get() == nullptr) {
1550     return error;
1551   }
1552   size_t interface_count = c->NumDirectInterfaces();
1553   expandBufAdd4BE(pReply, interface_count);
1554   for (size_t i = 0; i < interface_count; ++i) {
1555     expandBufAddRefTypeId(pReply,
1556                           gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1557   }
1558   return JDWP::ERR_NONE;
1559 }
1560 
OutputLineTable(JDWP::RefTypeId,JDWP::MethodId method_id,JDWP::ExpandBuf * pReply)1561 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
1562   struct DebugCallbackContext {
1563     int numItems;
1564     JDWP::ExpandBuf* pReply;
1565 
1566     static bool Callback(void* context, const DexFile::PositionInfo& entry) {
1567       DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1568       expandBufAdd8BE(pContext->pReply, entry.address_);
1569       expandBufAdd4BE(pContext->pReply, entry.line_);
1570       pContext->numItems++;
1571       return false;
1572     }
1573   };
1574   ArtMethod* m = FromMethodId(method_id);
1575   const DexFile::CodeItem* code_item = m->GetCodeItem();
1576   uint64_t start, end;
1577   if (code_item == nullptr) {
1578     DCHECK(m->IsNative() || m->IsProxyMethod());
1579     start = -1;
1580     end = -1;
1581   } else {
1582     start = 0;
1583     // Return the index of the last instruction
1584     end = code_item->insns_size_in_code_units_ - 1;
1585   }
1586 
1587   expandBufAdd8BE(pReply, start);
1588   expandBufAdd8BE(pReply, end);
1589 
1590   // Add numLines later
1591   size_t numLinesOffset = expandBufGetLength(pReply);
1592   expandBufAdd4BE(pReply, 0);
1593 
1594   DebugCallbackContext context;
1595   context.numItems = 0;
1596   context.pReply = pReply;
1597 
1598   if (code_item != nullptr) {
1599     m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
1600   }
1601 
1602   JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1603 }
1604 
OutputVariableTable(JDWP::RefTypeId,JDWP::MethodId method_id,bool with_generic,JDWP::ExpandBuf * pReply)1605 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1606                               JDWP::ExpandBuf* pReply) {
1607   struct DebugCallbackContext {
1608     ArtMethod* method;
1609     JDWP::ExpandBuf* pReply;
1610     size_t variable_count;
1611     bool with_generic;
1612 
1613     static void Callback(void* context, const DexFile::LocalInfo& entry)
1614         SHARED_REQUIRES(Locks::mutator_lock_) {
1615       DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1616 
1617       uint16_t slot = entry.reg_;
1618       VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1619                                  pContext->variable_count, entry.start_address_,
1620                                  entry.end_address_ - entry.start_address_,
1621                                  entry.name_, entry.descriptor_, entry.signature_, slot,
1622                                  MangleSlot(slot, pContext->method));
1623 
1624       slot = MangleSlot(slot, pContext->method);
1625 
1626       expandBufAdd8BE(pContext->pReply, entry.start_address_);
1627       expandBufAddUtf8String(pContext->pReply, entry.name_);
1628       expandBufAddUtf8String(pContext->pReply, entry.descriptor_);
1629       if (pContext->with_generic) {
1630         expandBufAddUtf8String(pContext->pReply, entry.signature_);
1631       }
1632       expandBufAdd4BE(pContext->pReply, entry.end_address_- entry.start_address_);
1633       expandBufAdd4BE(pContext->pReply, slot);
1634 
1635       ++pContext->variable_count;
1636     }
1637   };
1638   ArtMethod* m = FromMethodId(method_id);
1639 
1640   // arg_count considers doubles and longs to take 2 units.
1641   // variable_count considers everything to take 1 unit.
1642   std::string shorty(m->GetShorty());
1643   expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty));
1644 
1645   // We don't know the total number of variables yet, so leave a blank and update it later.
1646   size_t variable_count_offset = expandBufGetLength(pReply);
1647   expandBufAdd4BE(pReply, 0);
1648 
1649   DebugCallbackContext context;
1650   context.method = m;
1651   context.pReply = pReply;
1652   context.variable_count = 0;
1653   context.with_generic = with_generic;
1654 
1655   const DexFile::CodeItem* code_item = m->GetCodeItem();
1656   if (code_item != nullptr) {
1657     m->GetDexFile()->DecodeDebugLocalInfo(
1658         code_item, m->IsStatic(), m->GetDexMethodIndex(), DebugCallbackContext::Callback,
1659         &context);
1660   }
1661 
1662   JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1663 }
1664 
OutputMethodReturnValue(JDWP::MethodId method_id,const JValue * return_value,JDWP::ExpandBuf * pReply)1665 void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1666                                   JDWP::ExpandBuf* pReply) {
1667   ArtMethod* m = FromMethodId(method_id);
1668   JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1669   OutputJValue(tag, return_value, pReply);
1670 }
1671 
OutputFieldValue(JDWP::FieldId field_id,const JValue * field_value,JDWP::ExpandBuf * pReply)1672 void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1673                            JDWP::ExpandBuf* pReply) {
1674   ArtField* f = FromFieldId(field_id);
1675   JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1676   OutputJValue(tag, field_value, pReply);
1677 }
1678 
GetBytecodes(JDWP::RefTypeId,JDWP::MethodId method_id,std::vector<uint8_t> * bytecodes)1679 JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1680                                   std::vector<uint8_t>* bytecodes) {
1681   ArtMethod* m = FromMethodId(method_id);
1682   if (m == nullptr) {
1683     return JDWP::ERR_INVALID_METHODID;
1684   }
1685   const DexFile::CodeItem* code_item = m->GetCodeItem();
1686   size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1687   const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1688   const uint8_t* end = begin + byte_count;
1689   for (const uint8_t* p = begin; p != end; ++p) {
1690     bytecodes->push_back(*p);
1691   }
1692   return JDWP::ERR_NONE;
1693 }
1694 
GetFieldBasicTag(JDWP::FieldId field_id)1695 JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1696   return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1697 }
1698 
GetStaticFieldBasicTag(JDWP::FieldId field_id)1699 JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1700   return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1701 }
1702 
GetArtFieldValue(ArtField * f,mirror::Object * o)1703 static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
1704     SHARED_REQUIRES(Locks::mutator_lock_) {
1705   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1706   JValue field_value;
1707   switch (fieldType) {
1708     case Primitive::kPrimBoolean:
1709       field_value.SetZ(f->GetBoolean(o));
1710       return field_value;
1711 
1712     case Primitive::kPrimByte:
1713       field_value.SetB(f->GetByte(o));
1714       return field_value;
1715 
1716     case Primitive::kPrimChar:
1717       field_value.SetC(f->GetChar(o));
1718       return field_value;
1719 
1720     case Primitive::kPrimShort:
1721       field_value.SetS(f->GetShort(o));
1722       return field_value;
1723 
1724     case Primitive::kPrimInt:
1725     case Primitive::kPrimFloat:
1726       // Int and Float must be treated as 32-bit values in JDWP.
1727       field_value.SetI(f->GetInt(o));
1728       return field_value;
1729 
1730     case Primitive::kPrimLong:
1731     case Primitive::kPrimDouble:
1732       // Long and Double must be treated as 64-bit values in JDWP.
1733       field_value.SetJ(f->GetLong(o));
1734       return field_value;
1735 
1736     case Primitive::kPrimNot:
1737       field_value.SetL(f->GetObject(o));
1738       return field_value;
1739 
1740     case Primitive::kPrimVoid:
1741       LOG(FATAL) << "Attempt to read from field of type 'void'";
1742       UNREACHABLE();
1743   }
1744   LOG(FATAL) << "Attempt to read from field of unknown type";
1745   UNREACHABLE();
1746 }
1747 
GetFieldValueImpl(JDWP::RefTypeId ref_type_id,JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply,bool is_static)1748 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1749                                          JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1750                                          bool is_static)
1751     SHARED_REQUIRES(Locks::mutator_lock_) {
1752   JDWP::JdwpError error;
1753   mirror::Class* c = DecodeClass(ref_type_id, &error);
1754   if (ref_type_id != 0 && c == nullptr) {
1755     return error;
1756   }
1757 
1758   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1759   if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1760     return JDWP::ERR_INVALID_OBJECT;
1761   }
1762   ArtField* f = FromFieldId(field_id);
1763 
1764   mirror::Class* receiver_class = c;
1765   if (receiver_class == nullptr && o != nullptr) {
1766     receiver_class = o->GetClass();
1767   }
1768   // TODO: should we give up now if receiver_class is null?
1769   if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1770     LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1771     return JDWP::ERR_INVALID_FIELDID;
1772   }
1773 
1774   // The RI only enforces the static/non-static mismatch in one direction.
1775   // TODO: should we change the tests and check both?
1776   if (is_static) {
1777     if (!f->IsStatic()) {
1778       return JDWP::ERR_INVALID_FIELDID;
1779     }
1780   } else {
1781     if (f->IsStatic()) {
1782       LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
1783                    << " on static field " << PrettyField(f);
1784     }
1785   }
1786   if (f->IsStatic()) {
1787     o = f->GetDeclaringClass();
1788   }
1789 
1790   JValue field_value(GetArtFieldValue(f, o));
1791   JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1792   Dbg::OutputJValue(tag, &field_value, pReply);
1793   return JDWP::ERR_NONE;
1794 }
1795 
GetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1796 JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1797                                    JDWP::ExpandBuf* pReply) {
1798   return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1799 }
1800 
GetStaticFieldValue(JDWP::RefTypeId ref_type_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1801 JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
1802                                          JDWP::ExpandBuf* pReply) {
1803   return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1804 }
1805 
SetArtFieldValue(ArtField * f,mirror::Object * o,uint64_t value,int width)1806 static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
1807     SHARED_REQUIRES(Locks::mutator_lock_) {
1808   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1809   // Debugging only happens at runtime so we know we are not running in a transaction.
1810   static constexpr bool kNoTransactionMode = false;
1811   switch (fieldType) {
1812     case Primitive::kPrimBoolean:
1813       CHECK_EQ(width, 1);
1814       f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1815       return JDWP::ERR_NONE;
1816 
1817     case Primitive::kPrimByte:
1818       CHECK_EQ(width, 1);
1819       f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1820       return JDWP::ERR_NONE;
1821 
1822     case Primitive::kPrimChar:
1823       CHECK_EQ(width, 2);
1824       f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
1825       return JDWP::ERR_NONE;
1826 
1827     case Primitive::kPrimShort:
1828       CHECK_EQ(width, 2);
1829       f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
1830       return JDWP::ERR_NONE;
1831 
1832     case Primitive::kPrimInt:
1833     case Primitive::kPrimFloat:
1834       CHECK_EQ(width, 4);
1835       // Int and Float must be treated as 32-bit values in JDWP.
1836       f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
1837       return JDWP::ERR_NONE;
1838 
1839     case Primitive::kPrimLong:
1840     case Primitive::kPrimDouble:
1841       CHECK_EQ(width, 8);
1842       // Long and Double must be treated as 64-bit values in JDWP.
1843       f->SetLong<kNoTransactionMode>(o, value);
1844       return JDWP::ERR_NONE;
1845 
1846     case Primitive::kPrimNot: {
1847       JDWP::JdwpError error;
1848       mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
1849       if (error != JDWP::ERR_NONE) {
1850         return JDWP::ERR_INVALID_OBJECT;
1851       }
1852       if (v != nullptr) {
1853         mirror::Class* field_type;
1854         {
1855           StackHandleScope<2> hs(Thread::Current());
1856           HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1857           HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1858           field_type = f->GetType<true>();
1859         }
1860         if (!field_type->IsAssignableFrom(v->GetClass())) {
1861           return JDWP::ERR_INVALID_OBJECT;
1862         }
1863       }
1864       f->SetObject<kNoTransactionMode>(o, v);
1865       return JDWP::ERR_NONE;
1866     }
1867 
1868     case Primitive::kPrimVoid:
1869       LOG(FATAL) << "Attempt to write to field of type 'void'";
1870       UNREACHABLE();
1871   }
1872   LOG(FATAL) << "Attempt to write to field of unknown type";
1873   UNREACHABLE();
1874 }
1875 
SetFieldValueImpl(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width,bool is_static)1876 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1877                                          uint64_t value, int width, bool is_static)
1878     SHARED_REQUIRES(Locks::mutator_lock_) {
1879   JDWP::JdwpError error;
1880   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1881   if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1882     return JDWP::ERR_INVALID_OBJECT;
1883   }
1884   ArtField* f = FromFieldId(field_id);
1885 
1886   // The RI only enforces the static/non-static mismatch in one direction.
1887   // TODO: should we change the tests and check both?
1888   if (is_static) {
1889     if (!f->IsStatic()) {
1890       return JDWP::ERR_INVALID_FIELDID;
1891     }
1892   } else {
1893     if (f->IsStatic()) {
1894       LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
1895                    << " on static field " << PrettyField(f);
1896     }
1897   }
1898   if (f->IsStatic()) {
1899     o = f->GetDeclaringClass();
1900   }
1901   return SetArtFieldValue(f, o, value, width);
1902 }
1903 
SetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width)1904 JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1905                                    int width) {
1906   return SetFieldValueImpl(object_id, field_id, value, width, false);
1907 }
1908 
SetStaticFieldValue(JDWP::FieldId field_id,uint64_t value,int width)1909 JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1910   return SetFieldValueImpl(0, field_id, value, width, true);
1911 }
1912 
StringToUtf8(JDWP::ObjectId string_id,std::string * str)1913 JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1914   JDWP::JdwpError error;
1915   mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
1916   if (error != JDWP::ERR_NONE) {
1917     return error;
1918   }
1919   if (obj == nullptr) {
1920     return JDWP::ERR_INVALID_OBJECT;
1921   }
1922   {
1923     ScopedObjectAccessUnchecked soa(Thread::Current());
1924     mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1925     if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1926       // This isn't a string.
1927       return JDWP::ERR_INVALID_STRING;
1928     }
1929   }
1930   *str = obj->AsString()->ToModifiedUtf8();
1931   return JDWP::ERR_NONE;
1932 }
1933 
OutputJValue(JDWP::JdwpTag tag,const JValue * return_value,JDWP::ExpandBuf * pReply)1934 void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1935   if (IsPrimitiveTag(tag)) {
1936     expandBufAdd1(pReply, tag);
1937     if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1938       expandBufAdd1(pReply, return_value->GetI());
1939     } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1940       expandBufAdd2BE(pReply, return_value->GetI());
1941     } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1942       expandBufAdd4BE(pReply, return_value->GetI());
1943     } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1944       expandBufAdd8BE(pReply, return_value->GetJ());
1945     } else {
1946       CHECK_EQ(tag, JDWP::JT_VOID);
1947     }
1948   } else {
1949     ScopedObjectAccessUnchecked soa(Thread::Current());
1950     mirror::Object* value = return_value->GetL();
1951     expandBufAdd1(pReply, TagFromObject(soa, value));
1952     expandBufAddObjectId(pReply, gRegistry->Add(value));
1953   }
1954 }
1955 
GetThreadName(JDWP::ObjectId thread_id,std::string * name)1956 JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
1957   ScopedObjectAccessUnchecked soa(Thread::Current());
1958   JDWP::JdwpError error;
1959   DecodeThread(soa, thread_id, &error);
1960   if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1961     return error;
1962   }
1963 
1964   // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1965   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
1966   CHECK(thread_object != nullptr) << error;
1967   ArtField* java_lang_Thread_name_field =
1968       soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1969   mirror::String* s =
1970       reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1971   if (s != nullptr) {
1972     *name = s->ToModifiedUtf8();
1973   }
1974   return JDWP::ERR_NONE;
1975 }
1976 
GetThreadGroup(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)1977 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1978   ScopedObjectAccessUnchecked soa(Thread::Current());
1979   JDWP::JdwpError error;
1980   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
1981   if (error != JDWP::ERR_NONE) {
1982     return JDWP::ERR_INVALID_OBJECT;
1983   }
1984   ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
1985   // Okay, so it's an object, but is it actually a thread?
1986   DecodeThread(soa, thread_id, &error);
1987   if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1988     // Zombie threads are in the null group.
1989     expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1990     error = JDWP::ERR_NONE;
1991   } else if (error == JDWP::ERR_NONE) {
1992     mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1993     CHECK(c != nullptr);
1994     ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
1995     CHECK(f != nullptr);
1996     mirror::Object* group = f->GetObject(thread_object);
1997     CHECK(group != nullptr);
1998     JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1999     expandBufAddObjectId(pReply, thread_group_id);
2000   }
2001   return error;
2002 }
2003 
DecodeThreadGroup(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_group_id,JDWP::JdwpError * error)2004 static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
2005                                          JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
2006     SHARED_REQUIRES(Locks::mutator_lock_) {
2007   mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
2008                                                                                 error);
2009   if (*error != JDWP::ERR_NONE) {
2010     return nullptr;
2011   }
2012   if (thread_group == nullptr) {
2013     *error = JDWP::ERR_INVALID_OBJECT;
2014     return nullptr;
2015   }
2016   mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2017   CHECK(c != nullptr);
2018   if (!c->IsAssignableFrom(thread_group->GetClass())) {
2019     // This is not a java.lang.ThreadGroup.
2020     *error = JDWP::ERR_INVALID_THREAD_GROUP;
2021     return nullptr;
2022   }
2023   *error = JDWP::ERR_NONE;
2024   return thread_group;
2025 }
2026 
GetThreadGroupName(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2027 JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2028   ScopedObjectAccessUnchecked soa(Thread::Current());
2029   JDWP::JdwpError error;
2030   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2031   if (error != JDWP::ERR_NONE) {
2032     return error;
2033   }
2034   ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
2035   ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
2036   CHECK(f != nullptr);
2037   mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
2038 
2039   std::string thread_group_name(s->ToModifiedUtf8());
2040   expandBufAddUtf8String(pReply, thread_group_name);
2041   return JDWP::ERR_NONE;
2042 }
2043 
GetThreadGroupParent(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2044 JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2045   ScopedObjectAccessUnchecked soa(Thread::Current());
2046   JDWP::JdwpError error;
2047   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2048   if (error != JDWP::ERR_NONE) {
2049     return error;
2050   }
2051   mirror::Object* parent;
2052   {
2053     ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
2054     ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
2055     CHECK(f != nullptr);
2056     parent = f->GetObject(thread_group);
2057   }
2058   JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
2059   expandBufAddObjectId(pReply, parent_group_id);
2060   return JDWP::ERR_NONE;
2061 }
2062 
GetChildThreadGroups(ScopedObjectAccessUnchecked & soa,mirror::Object * thread_group,std::vector<JDWP::ObjectId> * child_thread_group_ids)2063 static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
2064                                  std::vector<JDWP::ObjectId>* child_thread_group_ids)
2065     SHARED_REQUIRES(Locks::mutator_lock_) {
2066   CHECK(thread_group != nullptr);
2067 
2068   // Get the int "ngroups" count of this thread group...
2069   ArtField* ngroups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
2070   CHECK(ngroups_field != nullptr);
2071   const int32_t size = ngroups_field->GetInt(thread_group);
2072   if (size == 0) {
2073     return;
2074   }
2075 
2076   // Get the ThreadGroup[] "groups" out of this thread group...
2077   ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
2078   mirror::Object* groups_array = groups_field->GetObject(thread_group);
2079 
2080   CHECK(groups_array != nullptr);
2081   CHECK(groups_array->IsObjectArray());
2082 
2083   mirror::ObjectArray<mirror::Object>* groups_array_as_array =
2084       groups_array->AsObjectArray<mirror::Object>();
2085 
2086   // Copy the first 'size' elements out of the array into the result.
2087   ObjectRegistry* registry = Dbg::GetObjectRegistry();
2088   for (int32_t i = 0; i < size; ++i) {
2089     child_thread_group_ids->push_back(registry->Add(groups_array_as_array->Get(i)));
2090   }
2091 }
2092 
GetThreadGroupChildren(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2093 JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2094                                             JDWP::ExpandBuf* pReply) {
2095   ScopedObjectAccessUnchecked soa(Thread::Current());
2096   JDWP::JdwpError error;
2097   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2098   if (error != JDWP::ERR_NONE) {
2099     return error;
2100   }
2101 
2102   // Add child threads.
2103   {
2104     std::vector<JDWP::ObjectId> child_thread_ids;
2105     GetThreads(thread_group, &child_thread_ids);
2106     expandBufAdd4BE(pReply, child_thread_ids.size());
2107     for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2108       expandBufAddObjectId(pReply, child_thread_id);
2109     }
2110   }
2111 
2112   // Add child thread groups.
2113   {
2114     std::vector<JDWP::ObjectId> child_thread_groups_ids;
2115     GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
2116     expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2117     for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2118       expandBufAddObjectId(pReply, child_thread_group_id);
2119     }
2120   }
2121 
2122   return JDWP::ERR_NONE;
2123 }
2124 
GetSystemThreadGroupId()2125 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2126   ScopedObjectAccessUnchecked soa(Thread::Current());
2127   ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2128   mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2129   return gRegistry->Add(group);
2130 }
2131 
ToJdwpThreadStatus(ThreadState state)2132 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2133   switch (state) {
2134     case kBlocked:
2135       return JDWP::TS_MONITOR;
2136     case kNative:
2137     case kRunnable:
2138     case kSuspended:
2139       return JDWP::TS_RUNNING;
2140     case kSleeping:
2141       return JDWP::TS_SLEEPING;
2142     case kStarting:
2143     case kTerminated:
2144       return JDWP::TS_ZOMBIE;
2145     case kTimedWaiting:
2146     case kWaitingForCheckPointsToRun:
2147     case kWaitingForDebuggerSend:
2148     case kWaitingForDebuggerSuspension:
2149     case kWaitingForDebuggerToAttach:
2150     case kWaitingForDeoptimization:
2151     case kWaitingForGcToComplete:
2152     case kWaitingForGetObjectsAllocated:
2153     case kWaitingForJniOnLoad:
2154     case kWaitingForMethodTracingStart:
2155     case kWaitingForSignalCatcherOutput:
2156     case kWaitingForVisitObjects:
2157     case kWaitingInMainDebuggerLoop:
2158     case kWaitingInMainSignalCatcherLoop:
2159     case kWaitingPerformingGc:
2160     case kWaitingWeakGcRootRead:
2161     case kWaitingForGcThreadFlip:
2162     case kWaiting:
2163       return JDWP::TS_WAIT;
2164       // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2165   }
2166   LOG(FATAL) << "Unknown thread state: " << state;
2167   return JDWP::TS_ZOMBIE;
2168 }
2169 
GetThreadStatus(JDWP::ObjectId thread_id,JDWP::JdwpThreadStatus * pThreadStatus,JDWP::JdwpSuspendStatus * pSuspendStatus)2170 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2171                                      JDWP::JdwpSuspendStatus* pSuspendStatus) {
2172   ScopedObjectAccess soa(Thread::Current());
2173 
2174   *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2175 
2176   JDWP::JdwpError error;
2177   Thread* thread = DecodeThread(soa, thread_id, &error);
2178   if (error != JDWP::ERR_NONE) {
2179     if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2180       *pThreadStatus = JDWP::TS_ZOMBIE;
2181       return JDWP::ERR_NONE;
2182     }
2183     return error;
2184   }
2185 
2186   if (IsSuspendedForDebugger(soa, thread)) {
2187     *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2188   }
2189 
2190   *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2191   return JDWP::ERR_NONE;
2192 }
2193 
GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)2194 JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2195   ScopedObjectAccess soa(Thread::Current());
2196   JDWP::JdwpError error;
2197   Thread* thread = DecodeThread(soa, thread_id, &error);
2198   if (error != JDWP::ERR_NONE) {
2199     return error;
2200   }
2201   MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2202   expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2203   return JDWP::ERR_NONE;
2204 }
2205 
Interrupt(JDWP::ObjectId thread_id)2206 JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2207   ScopedObjectAccess soa(Thread::Current());
2208   JDWP::JdwpError error;
2209   Thread* thread = DecodeThread(soa, thread_id, &error);
2210   if (error != JDWP::ERR_NONE) {
2211     return error;
2212   }
2213   thread->Interrupt(soa.Self());
2214   return JDWP::ERR_NONE;
2215 }
2216 
IsInDesiredThreadGroup(ScopedObjectAccessUnchecked & soa,mirror::Object * desired_thread_group,mirror::Object * peer)2217 static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2218                                    mirror::Object* desired_thread_group, mirror::Object* peer)
2219     SHARED_REQUIRES(Locks::mutator_lock_) {
2220   // Do we want threads from all thread groups?
2221   if (desired_thread_group == nullptr) {
2222     return true;
2223   }
2224   ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2225   DCHECK(thread_group_field != nullptr);
2226   mirror::Object* group = thread_group_field->GetObject(peer);
2227   return (group == desired_thread_group);
2228 }
2229 
GetThreads(mirror::Object * thread_group,std::vector<JDWP::ObjectId> * thread_ids)2230 void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2231   ScopedObjectAccessUnchecked soa(Thread::Current());
2232   std::list<Thread*> all_threads_list;
2233   {
2234     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2235     all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2236   }
2237   for (Thread* t : all_threads_list) {
2238     if (t == Dbg::GetDebugThread()) {
2239       // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2240       // query all threads, so it's easier if we just don't tell them about this thread.
2241       continue;
2242     }
2243     if (t->IsStillStarting()) {
2244       // This thread is being started (and has been registered in the thread list). However, it is
2245       // not completely started yet so we must ignore it.
2246       continue;
2247     }
2248     mirror::Object* peer = t->GetPeer();
2249     if (peer == nullptr) {
2250       // peer might be null if the thread is still starting up. We can't tell the debugger about
2251       // this thread yet.
2252       // TODO: if we identified threads to the debugger by their Thread*
2253       // rather than their peer's mirror::Object*, we could fix this.
2254       // Doing so might help us report ZOMBIE threads too.
2255       continue;
2256     }
2257     if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2258       thread_ids->push_back(gRegistry->Add(peer));
2259     }
2260   }
2261 }
2262 
GetStackDepth(Thread * thread)2263 static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
2264   struct CountStackDepthVisitor : public StackVisitor {
2265     explicit CountStackDepthVisitor(Thread* thread_in)
2266         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2267           depth(0) {}
2268 
2269     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2270     // annotalysis.
2271     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2272       if (!GetMethod()->IsRuntimeMethod()) {
2273         ++depth;
2274       }
2275       return true;
2276     }
2277     size_t depth;
2278   };
2279 
2280   CountStackDepthVisitor visitor(thread);
2281   visitor.WalkStack();
2282   return visitor.depth;
2283 }
2284 
GetThreadFrameCount(JDWP::ObjectId thread_id,size_t * result)2285 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
2286   ScopedObjectAccess soa(Thread::Current());
2287   JDWP::JdwpError error;
2288   *result = 0;
2289   Thread* thread = DecodeThread(soa, thread_id, &error);
2290   if (error != JDWP::ERR_NONE) {
2291     return error;
2292   }
2293   if (!IsSuspendedForDebugger(soa, thread)) {
2294     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2295   }
2296   *result = GetStackDepth(thread);
2297   return JDWP::ERR_NONE;
2298 }
2299 
GetThreadFrames(JDWP::ObjectId thread_id,size_t start_frame,size_t frame_count,JDWP::ExpandBuf * buf)2300 JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2301                                      size_t frame_count, JDWP::ExpandBuf* buf) {
2302   class GetFrameVisitor : public StackVisitor {
2303    public:
2304     GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
2305                     JDWP::ExpandBuf* buf_in)
2306         SHARED_REQUIRES(Locks::mutator_lock_)
2307         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2308           depth_(0),
2309           start_frame_(start_frame_in),
2310           frame_count_(frame_count_in),
2311           buf_(buf_in) {
2312       expandBufAdd4BE(buf_, frame_count_);
2313     }
2314 
2315     bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2316       if (GetMethod()->IsRuntimeMethod()) {
2317         return true;  // The debugger can't do anything useful with a frame that has no Method*.
2318       }
2319       if (depth_ >= start_frame_ + frame_count_) {
2320         return false;
2321       }
2322       if (depth_ >= start_frame_) {
2323         JDWP::FrameId frame_id(GetFrameId());
2324         JDWP::JdwpLocation location;
2325         SetJdwpLocation(&location, GetMethod(), GetDexPc());
2326         VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2327         expandBufAdd8BE(buf_, frame_id);
2328         expandBufAddLocation(buf_, location);
2329       }
2330       ++depth_;
2331       return true;
2332     }
2333 
2334    private:
2335     size_t depth_;
2336     const size_t start_frame_;
2337     const size_t frame_count_;
2338     JDWP::ExpandBuf* buf_;
2339   };
2340 
2341   ScopedObjectAccessUnchecked soa(Thread::Current());
2342   JDWP::JdwpError error;
2343   Thread* thread = DecodeThread(soa, thread_id, &error);
2344   if (error != JDWP::ERR_NONE) {
2345     return error;
2346   }
2347   if (!IsSuspendedForDebugger(soa, thread)) {
2348     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2349   }
2350   GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2351   visitor.WalkStack();
2352   return JDWP::ERR_NONE;
2353 }
2354 
GetThreadSelfId()2355 JDWP::ObjectId Dbg::GetThreadSelfId() {
2356   return GetThreadId(Thread::Current());
2357 }
2358 
GetThreadId(Thread * thread)2359 JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2360   ScopedObjectAccessUnchecked soa(Thread::Current());
2361   return gRegistry->Add(thread->GetPeer());
2362 }
2363 
SuspendVM()2364 void Dbg::SuspendVM() {
2365   Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2366 }
2367 
ResumeVM()2368 void Dbg::ResumeVM() {
2369   Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
2370 }
2371 
SuspendThread(JDWP::ObjectId thread_id,bool request_suspension)2372 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2373   Thread* self = Thread::Current();
2374   ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
2375   {
2376     ScopedObjectAccess soa(self);
2377     JDWP::JdwpError error;
2378     peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
2379   }
2380   if (peer.get() == nullptr) {
2381     return JDWP::ERR_THREAD_NOT_ALIVE;
2382   }
2383   // Suspend thread to build stack trace.
2384   bool timed_out;
2385   ThreadList* thread_list = Runtime::Current()->GetThreadList();
2386   Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2387                                                     &timed_out);
2388   if (thread != nullptr) {
2389     return JDWP::ERR_NONE;
2390   } else if (timed_out) {
2391     return JDWP::ERR_INTERNAL;
2392   } else {
2393     return JDWP::ERR_THREAD_NOT_ALIVE;
2394   }
2395 }
2396 
ResumeThread(JDWP::ObjectId thread_id)2397 void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2398   ScopedObjectAccessUnchecked soa(Thread::Current());
2399   JDWP::JdwpError error;
2400   mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
2401   CHECK(peer != nullptr) << error;
2402   Thread* thread;
2403   {
2404     MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2405     thread = Thread::FromManagedThread(soa, peer);
2406   }
2407   if (thread == nullptr) {
2408     LOG(WARNING) << "No such thread for resume: " << peer;
2409     return;
2410   }
2411   bool needs_resume;
2412   {
2413     MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2414     needs_resume = thread->GetSuspendCount() > 0;
2415   }
2416   if (needs_resume) {
2417     Runtime::Current()->GetThreadList()->Resume(thread, true);
2418   }
2419 }
2420 
SuspendSelf()2421 void Dbg::SuspendSelf() {
2422   Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2423 }
2424 
2425 struct GetThisVisitor : public StackVisitor {
GetThisVisitorart::GetThisVisitor2426   GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
2427       SHARED_REQUIRES(Locks::mutator_lock_)
2428       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2429         this_object(nullptr),
2430         frame_id(frame_id_in) {}
2431 
2432   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2433   // annotalysis.
VisitFrameart::GetThisVisitor2434   virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2435     if (frame_id != GetFrameId()) {
2436       return true;  // continue
2437     } else {
2438       this_object = GetThisObject();
2439       return false;
2440     }
2441   }
2442 
2443   mirror::Object* this_object;
2444   JDWP::FrameId frame_id;
2445 };
2446 
GetThisObject(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,JDWP::ObjectId * result)2447 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2448                                    JDWP::ObjectId* result) {
2449   ScopedObjectAccessUnchecked soa(Thread::Current());
2450   JDWP::JdwpError error;
2451   Thread* thread = DecodeThread(soa, thread_id, &error);
2452   if (error != JDWP::ERR_NONE) {
2453     return error;
2454   }
2455   if (!IsSuspendedForDebugger(soa, thread)) {
2456     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2457   }
2458   std::unique_ptr<Context> context(Context::Create());
2459   GetThisVisitor visitor(thread, context.get(), frame_id);
2460   visitor.WalkStack();
2461   *result = gRegistry->Add(visitor.this_object);
2462   return JDWP::ERR_NONE;
2463 }
2464 
2465 // Walks the stack until we find the frame with the given FrameId.
2466 class FindFrameVisitor FINAL : public StackVisitor {
2467  public:
FindFrameVisitor(Thread * thread,Context * context,JDWP::FrameId frame_id)2468   FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2469       SHARED_REQUIRES(Locks::mutator_lock_)
2470       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2471         frame_id_(frame_id),
2472         error_(JDWP::ERR_INVALID_FRAMEID) {}
2473 
2474   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2475   // annotalysis.
VisitFrame()2476   bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2477     if (GetFrameId() != frame_id_) {
2478       return true;  // Not our frame, carry on.
2479     }
2480     ArtMethod* m = GetMethod();
2481     if (m->IsNative()) {
2482       // We can't read/write local value from/into native method.
2483       error_ = JDWP::ERR_OPAQUE_FRAME;
2484     } else {
2485       // We found our frame.
2486       error_ = JDWP::ERR_NONE;
2487     }
2488     return false;
2489   }
2490 
GetError() const2491   JDWP::JdwpError GetError() const {
2492     return error_;
2493   }
2494 
2495  private:
2496   const JDWP::FrameId frame_id_;
2497   JDWP::JdwpError error_;
2498 
2499   DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor);
2500 };
2501 
GetLocalValues(JDWP::Request * request,JDWP::ExpandBuf * pReply)2502 JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
2503   JDWP::ObjectId thread_id = request->ReadThreadId();
2504   JDWP::FrameId frame_id = request->ReadFrameId();
2505 
2506   ScopedObjectAccessUnchecked soa(Thread::Current());
2507   JDWP::JdwpError error;
2508   Thread* thread = DecodeThread(soa, thread_id, &error);
2509   if (error != JDWP::ERR_NONE) {
2510     return error;
2511   }
2512   if (!IsSuspendedForDebugger(soa, thread)) {
2513     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2514   }
2515   // Find the frame with the given frame_id.
2516   std::unique_ptr<Context> context(Context::Create());
2517   FindFrameVisitor visitor(thread, context.get(), frame_id);
2518   visitor.WalkStack();
2519   if (visitor.GetError() != JDWP::ERR_NONE) {
2520     return visitor.GetError();
2521   }
2522 
2523   // Read the values from visitor's context.
2524   int32_t slot_count = request->ReadSigned32("slot count");
2525   expandBufAdd4BE(pReply, slot_count);     /* "int values" */
2526   for (int32_t i = 0; i < slot_count; ++i) {
2527     uint32_t slot = request->ReadUnsigned32("slot");
2528     JDWP::JdwpTag reqSigByte = request->ReadTag();
2529 
2530     VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
2531 
2532     size_t width = Dbg::GetTagWidth(reqSigByte);
2533     uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
2534     error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
2535     if (error != JDWP::ERR_NONE) {
2536       return error;
2537     }
2538   }
2539   return JDWP::ERR_NONE;
2540 }
2541 
2542 constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
2543 
GetStackContextAsString(const StackVisitor & visitor)2544 static std::string GetStackContextAsString(const StackVisitor& visitor)
2545     SHARED_REQUIRES(Locks::mutator_lock_) {
2546   return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
2547                       PrettyMethod(visitor.GetMethod()).c_str());
2548 }
2549 
FailGetLocalValue(const StackVisitor & visitor,uint16_t vreg,JDWP::JdwpTag tag)2550 static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2551                                          JDWP::JdwpTag tag)
2552     SHARED_REQUIRES(Locks::mutator_lock_) {
2553   LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
2554              << GetStackContextAsString(visitor);
2555   return kStackFrameLocalAccessError;
2556 }
2557 
GetLocalValue(const StackVisitor & visitor,ScopedObjectAccessUnchecked & soa,int slot,JDWP::JdwpTag tag,uint8_t * buf,size_t width)2558 JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
2559                                    int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2560   ArtMethod* m = visitor.GetMethod();
2561   JDWP::JdwpError error = JDWP::ERR_NONE;
2562   uint16_t vreg = DemangleSlot(slot, m, &error);
2563   if (error != JDWP::ERR_NONE) {
2564     return error;
2565   }
2566   // TODO: check that the tag is compatible with the actual type of the slot!
2567   switch (tag) {
2568     case JDWP::JT_BOOLEAN: {
2569       CHECK_EQ(width, 1U);
2570       uint32_t intVal;
2571       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2572         return FailGetLocalValue(visitor, vreg, tag);
2573       }
2574       VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal;
2575       JDWP::Set1(buf + 1, intVal != 0);
2576       break;
2577     }
2578     case JDWP::JT_BYTE: {
2579       CHECK_EQ(width, 1U);
2580       uint32_t intVal;
2581       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2582         return FailGetLocalValue(visitor, vreg, tag);
2583       }
2584       VLOG(jdwp) << "get byte local " << vreg << " = " << intVal;
2585       JDWP::Set1(buf + 1, intVal);
2586       break;
2587     }
2588     case JDWP::JT_SHORT:
2589     case JDWP::JT_CHAR: {
2590       CHECK_EQ(width, 2U);
2591       uint32_t intVal;
2592       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2593         return FailGetLocalValue(visitor, vreg, tag);
2594       }
2595       VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal;
2596       JDWP::Set2BE(buf + 1, intVal);
2597       break;
2598     }
2599     case JDWP::JT_INT: {
2600       CHECK_EQ(width, 4U);
2601       uint32_t intVal;
2602       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2603         return FailGetLocalValue(visitor, vreg, tag);
2604       }
2605       VLOG(jdwp) << "get int local " << vreg << " = " << intVal;
2606       JDWP::Set4BE(buf + 1, intVal);
2607       break;
2608     }
2609     case JDWP::JT_FLOAT: {
2610       CHECK_EQ(width, 4U);
2611       uint32_t intVal;
2612       if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) {
2613         return FailGetLocalValue(visitor, vreg, tag);
2614       }
2615       VLOG(jdwp) << "get float local " << vreg << " = " << intVal;
2616       JDWP::Set4BE(buf + 1, intVal);
2617       break;
2618     }
2619     case JDWP::JT_ARRAY:
2620     case JDWP::JT_CLASS_LOADER:
2621     case JDWP::JT_CLASS_OBJECT:
2622     case JDWP::JT_OBJECT:
2623     case JDWP::JT_STRING:
2624     case JDWP::JT_THREAD:
2625     case JDWP::JT_THREAD_GROUP: {
2626       CHECK_EQ(width, sizeof(JDWP::ObjectId));
2627       uint32_t intVal;
2628       if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) {
2629         return FailGetLocalValue(visitor, vreg, tag);
2630       }
2631       mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2632       VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o;
2633       if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2634         LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u",
2635                                    reinterpret_cast<uintptr_t>(o), vreg)
2636                                    << GetStackContextAsString(visitor);
2637         UNREACHABLE();
2638       }
2639       tag = TagFromObject(soa, o);
2640       JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
2641       break;
2642     }
2643     case JDWP::JT_DOUBLE: {
2644       CHECK_EQ(width, 8U);
2645       uint64_t longVal;
2646       if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2647         return FailGetLocalValue(visitor, vreg, tag);
2648       }
2649       VLOG(jdwp) << "get double local " << vreg << " = " << longVal;
2650       JDWP::Set8BE(buf + 1, longVal);
2651       break;
2652     }
2653     case JDWP::JT_LONG: {
2654       CHECK_EQ(width, 8U);
2655       uint64_t longVal;
2656       if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) {
2657         return FailGetLocalValue(visitor, vreg, tag);
2658       }
2659       VLOG(jdwp) << "get long local " << vreg << " = " << longVal;
2660       JDWP::Set8BE(buf + 1, longVal);
2661       break;
2662     }
2663     default:
2664       LOG(FATAL) << "Unknown tag " << tag;
2665       UNREACHABLE();
2666   }
2667 
2668   // Prepend tag, which may have been updated.
2669   JDWP::Set1(buf, tag);
2670   return JDWP::ERR_NONE;
2671 }
2672 
SetLocalValues(JDWP::Request * request)2673 JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
2674   JDWP::ObjectId thread_id = request->ReadThreadId();
2675   JDWP::FrameId frame_id = request->ReadFrameId();
2676 
2677   ScopedObjectAccessUnchecked soa(Thread::Current());
2678   JDWP::JdwpError error;
2679   Thread* thread = DecodeThread(soa, thread_id, &error);
2680   if (error != JDWP::ERR_NONE) {
2681     return error;
2682   }
2683   if (!IsSuspendedForDebugger(soa, thread)) {
2684     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2685   }
2686   // Find the frame with the given frame_id.
2687   std::unique_ptr<Context> context(Context::Create());
2688   FindFrameVisitor visitor(thread, context.get(), frame_id);
2689   visitor.WalkStack();
2690   if (visitor.GetError() != JDWP::ERR_NONE) {
2691     return visitor.GetError();
2692   }
2693 
2694   // Writes the values into visitor's context.
2695   int32_t slot_count = request->ReadSigned32("slot count");
2696   for (int32_t i = 0; i < slot_count; ++i) {
2697     uint32_t slot = request->ReadUnsigned32("slot");
2698     JDWP::JdwpTag sigByte = request->ReadTag();
2699     size_t width = Dbg::GetTagWidth(sigByte);
2700     uint64_t value = request->ReadValue(width);
2701 
2702     VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
2703     error = Dbg::SetLocalValue(thread, visitor, slot, sigByte, value, width);
2704     if (error != JDWP::ERR_NONE) {
2705       return error;
2706     }
2707   }
2708   return JDWP::ERR_NONE;
2709 }
2710 
2711 template<typename T>
FailSetLocalValue(const StackVisitor & visitor,uint16_t vreg,JDWP::JdwpTag tag,T value)2712 static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2713                                          JDWP::JdwpTag tag, T value)
2714     SHARED_REQUIRES(Locks::mutator_lock_) {
2715   LOG(ERROR) << "Failed to write " << tag << " local " << value
2716              << " (0x" << std::hex << value << ") into register v" << vreg
2717              << GetStackContextAsString(visitor);
2718   return kStackFrameLocalAccessError;
2719 }
2720 
SetLocalValue(Thread * thread,StackVisitor & visitor,int slot,JDWP::JdwpTag tag,uint64_t value,size_t width)2721 JDWP::JdwpError Dbg::SetLocalValue(Thread* thread, StackVisitor& visitor, int slot,
2722                                    JDWP::JdwpTag tag, uint64_t value, size_t width) {
2723   ArtMethod* m = visitor.GetMethod();
2724   JDWP::JdwpError error = JDWP::ERR_NONE;
2725   uint16_t vreg = DemangleSlot(slot, m, &error);
2726   if (error != JDWP::ERR_NONE) {
2727     return error;
2728   }
2729   // TODO: check that the tag is compatible with the actual type of the slot!
2730   switch (tag) {
2731     case JDWP::JT_BOOLEAN:
2732     case JDWP::JT_BYTE:
2733       CHECK_EQ(width, 1U);
2734       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2735         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2736       }
2737       break;
2738     case JDWP::JT_SHORT:
2739     case JDWP::JT_CHAR:
2740       CHECK_EQ(width, 2U);
2741       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2742         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2743       }
2744       break;
2745     case JDWP::JT_INT:
2746       CHECK_EQ(width, 4U);
2747       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2748         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2749       }
2750       break;
2751     case JDWP::JT_FLOAT:
2752       CHECK_EQ(width, 4U);
2753       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) {
2754         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2755       }
2756       break;
2757     case JDWP::JT_ARRAY:
2758     case JDWP::JT_CLASS_LOADER:
2759     case JDWP::JT_CLASS_OBJECT:
2760     case JDWP::JT_OBJECT:
2761     case JDWP::JT_STRING:
2762     case JDWP::JT_THREAD:
2763     case JDWP::JT_THREAD_GROUP: {
2764       CHECK_EQ(width, sizeof(JDWP::ObjectId));
2765       mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
2766                                                           &error);
2767       if (error != JDWP::ERR_NONE) {
2768         VLOG(jdwp) << tag << " object " << o << " is an invalid object";
2769         return JDWP::ERR_INVALID_OBJECT;
2770       }
2771       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2772                                  kReferenceVReg)) {
2773         return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o));
2774       }
2775       break;
2776     }
2777     case JDWP::JT_DOUBLE: {
2778       CHECK_EQ(width, 8U);
2779       if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) {
2780         return FailSetLocalValue(visitor, vreg, tag, value);
2781       }
2782       break;
2783     }
2784     case JDWP::JT_LONG: {
2785       CHECK_EQ(width, 8U);
2786       if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) {
2787         return FailSetLocalValue(visitor, vreg, tag, value);
2788       }
2789       break;
2790     }
2791     default:
2792       LOG(FATAL) << "Unknown tag " << tag;
2793       UNREACHABLE();
2794   }
2795 
2796   // If we set the local variable in a compiled frame, we need to trigger a deoptimization of
2797   // the stack so we continue execution with the interpreter using the new value(s) of the updated
2798   // local variable(s). To achieve this, we install instrumentation exit stub on each method of the
2799   // thread's stack. The stub will cause the deoptimization to happen.
2800   if (!visitor.IsShadowFrame() && thread->HasDebuggerShadowFrames()) {
2801     Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(thread);
2802   }
2803 
2804   return JDWP::ERR_NONE;
2805 }
2806 
SetEventLocation(JDWP::EventLocation * location,ArtMethod * m,uint32_t dex_pc)2807 static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
2808     SHARED_REQUIRES(Locks::mutator_lock_) {
2809   DCHECK(location != nullptr);
2810   if (m == nullptr) {
2811     memset(location, 0, sizeof(*location));
2812   } else {
2813     location->method = GetCanonicalMethod(m);
2814     location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2815   }
2816 }
2817 
PostLocationEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,int event_flags,const JValue * return_value)2818 void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
2819                             int event_flags, const JValue* return_value) {
2820   if (!IsDebuggerActive()) {
2821     return;
2822   }
2823   DCHECK(m != nullptr);
2824   DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2825   JDWP::EventLocation location;
2826   SetEventLocation(&location, m, dex_pc);
2827 
2828   // We need to be sure no exception is pending when calling JdwpState::PostLocationEvent.
2829   // This is required to be able to call JNI functions to create JDWP ids. To achieve this,
2830   // we temporarily clear the current thread's exception (if any) and will restore it after
2831   // the call.
2832   // Note: the only way to get a pending exception here is to suspend on a move-exception
2833   // instruction.
2834   Thread* const self = Thread::Current();
2835   StackHandleScope<1> hs(self);
2836   Handle<mirror::Throwable> pending_exception(hs.NewHandle(self->GetException()));
2837   self->ClearException();
2838   if (kIsDebugBuild && pending_exception.Get() != nullptr) {
2839     const DexFile::CodeItem* code_item = location.method->GetCodeItem();
2840     const Instruction* instr = Instruction::At(&code_item->insns_[location.dex_pc]);
2841     CHECK_EQ(Instruction::MOVE_EXCEPTION, instr->Opcode());
2842   }
2843 
2844   gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2845 
2846   if (pending_exception.Get() != nullptr) {
2847     self->SetException(pending_exception.Get());
2848   }
2849 }
2850 
PostFieldAccessEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,ArtField * f)2851 void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
2852                                mirror::Object* this_object, ArtField* f) {
2853   if (!IsDebuggerActive()) {
2854     return;
2855   }
2856   DCHECK(m != nullptr);
2857   DCHECK(f != nullptr);
2858   JDWP::EventLocation location;
2859   SetEventLocation(&location, m, dex_pc);
2860 
2861   gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2862 }
2863 
PostFieldModificationEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,ArtField * f,const JValue * field_value)2864 void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
2865                                      mirror::Object* this_object, ArtField* f,
2866                                      const JValue* field_value) {
2867   if (!IsDebuggerActive()) {
2868     return;
2869   }
2870   DCHECK(m != nullptr);
2871   DCHECK(f != nullptr);
2872   DCHECK(field_value != nullptr);
2873   JDWP::EventLocation location;
2874   SetEventLocation(&location, m, dex_pc);
2875 
2876   gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2877 }
2878 
2879 /**
2880  * Finds the location where this exception will be caught. We search until we reach the top
2881  * frame, in which case this exception is considered uncaught.
2882  */
2883 class CatchLocationFinder : public StackVisitor {
2884  public:
CatchLocationFinder(Thread * self,const Handle<mirror::Throwable> & exception,Context * context)2885   CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
2886       SHARED_REQUIRES(Locks::mutator_lock_)
2887     : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2888       exception_(exception),
2889       handle_scope_(self),
2890       this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
2891       catch_method_(nullptr),
2892       throw_method_(nullptr),
2893       catch_dex_pc_(DexFile::kDexNoIndex),
2894       throw_dex_pc_(DexFile::kDexNoIndex) {
2895   }
2896 
VisitFrame()2897   bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2898     ArtMethod* method = GetMethod();
2899     DCHECK(method != nullptr);
2900     if (method->IsRuntimeMethod()) {
2901       // Ignore callee save method.
2902       DCHECK(method->IsCalleeSaveMethod());
2903       return true;
2904     }
2905 
2906     uint32_t dex_pc = GetDexPc();
2907     if (throw_method_ == nullptr) {
2908       // First Java method found. It is either the method that threw the exception,
2909       // or the Java native method that is reporting an exception thrown by
2910       // native code.
2911       this_at_throw_.Assign(GetThisObject());
2912       throw_method_ = method;
2913       throw_dex_pc_ = dex_pc;
2914     }
2915 
2916     if (dex_pc != DexFile::kDexNoIndex) {
2917       StackHandleScope<1> hs(GetThread());
2918       uint32_t found_dex_pc;
2919       Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
2920       bool unused_clear_exception;
2921       found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
2922       if (found_dex_pc != DexFile::kDexNoIndex) {
2923         catch_method_ = method;
2924         catch_dex_pc_ = found_dex_pc;
2925         return false;  // End stack walk.
2926       }
2927     }
2928     return true;  // Continue stack walk.
2929   }
2930 
GetCatchMethod()2931   ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
2932     return catch_method_;
2933   }
2934 
GetThrowMethod()2935   ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
2936     return throw_method_;
2937   }
2938 
GetThisAtThrow()2939   mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
2940     return this_at_throw_.Get();
2941   }
2942 
GetCatchDexPc() const2943   uint32_t GetCatchDexPc() const {
2944     return catch_dex_pc_;
2945   }
2946 
GetThrowDexPc() const2947   uint32_t GetThrowDexPc() const {
2948     return throw_dex_pc_;
2949   }
2950 
2951  private:
2952   const Handle<mirror::Throwable>& exception_;
2953   StackHandleScope<1> handle_scope_;
2954   MutableHandle<mirror::Object> this_at_throw_;
2955   ArtMethod* catch_method_;
2956   ArtMethod* throw_method_;
2957   uint32_t catch_dex_pc_;
2958   uint32_t throw_dex_pc_;
2959 
2960   DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
2961 };
2962 
PostException(mirror::Throwable * exception_object)2963 void Dbg::PostException(mirror::Throwable* exception_object) {
2964   if (!IsDebuggerActive()) {
2965     return;
2966   }
2967   Thread* const self = Thread::Current();
2968   StackHandleScope<1> handle_scope(self);
2969   Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
2970   std::unique_ptr<Context> context(Context::Create());
2971   CatchLocationFinder clf(self, h_exception, context.get());
2972   clf.WalkStack(/* include_transitions */ false);
2973   JDWP::EventLocation exception_throw_location;
2974   SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
2975   JDWP::EventLocation exception_catch_location;
2976   SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
2977 
2978   gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
2979                             clf.GetThisAtThrow());
2980 }
2981 
PostClassPrepare(mirror::Class * c)2982 void Dbg::PostClassPrepare(mirror::Class* c) {
2983   if (!IsDebuggerActive()) {
2984     return;
2985   }
2986   gJdwpState->PostClassPrepare(c);
2987 }
2988 
UpdateDebugger(Thread * thread,mirror::Object * this_object,ArtMethod * m,uint32_t dex_pc,int event_flags,const JValue * return_value)2989 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2990                          ArtMethod* m, uint32_t dex_pc,
2991                          int event_flags, const JValue* return_value) {
2992   if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2993     return;
2994   }
2995 
2996   if (IsBreakpoint(m, dex_pc)) {
2997     event_flags |= kBreakpoint;
2998   }
2999 
3000   // If the debugger is single-stepping one of our threads, check to
3001   // see if we're that thread and we've reached a step point.
3002   const SingleStepControl* single_step_control = thread->GetSingleStepControl();
3003   if (single_step_control != nullptr) {
3004     CHECK(!m->IsNative());
3005     if (single_step_control->GetStepDepth() == JDWP::SD_INTO) {
3006       // Step into method calls.  We break when the line number
3007       // or method pointer changes.  If we're in SS_MIN mode, we
3008       // always stop.
3009       if (single_step_control->GetMethod() != m) {
3010         event_flags |= kSingleStep;
3011         VLOG(jdwp) << "SS new method";
3012       } else if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3013         event_flags |= kSingleStep;
3014         VLOG(jdwp) << "SS new instruction";
3015       } else if (single_step_control->ContainsDexPc(dex_pc)) {
3016         event_flags |= kSingleStep;
3017         VLOG(jdwp) << "SS new line";
3018       }
3019     } else if (single_step_control->GetStepDepth() == JDWP::SD_OVER) {
3020       // Step over method calls.  We break when the line number is
3021       // different and the frame depth is <= the original frame
3022       // depth.  (We can't just compare on the method, because we
3023       // might get unrolled past it by an exception, and it's tricky
3024       // to identify recursion.)
3025 
3026       int stack_depth = GetStackDepth(thread);
3027 
3028       if (stack_depth < single_step_control->GetStackDepth()) {
3029         // Popped up one or more frames, always trigger.
3030         event_flags |= kSingleStep;
3031         VLOG(jdwp) << "SS method pop";
3032       } else if (stack_depth == single_step_control->GetStackDepth()) {
3033         // Same depth, see if we moved.
3034         if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3035           event_flags |= kSingleStep;
3036           VLOG(jdwp) << "SS new instruction";
3037         } else if (single_step_control->ContainsDexPc(dex_pc)) {
3038           event_flags |= kSingleStep;
3039           VLOG(jdwp) << "SS new line";
3040         }
3041       }
3042     } else {
3043       CHECK_EQ(single_step_control->GetStepDepth(), JDWP::SD_OUT);
3044       // Return from the current method.  We break when the frame
3045       // depth pops up.
3046 
3047       // This differs from the "method exit" break in that it stops
3048       // with the PC at the next instruction in the returned-to
3049       // function, rather than the end of the returning function.
3050 
3051       int stack_depth = GetStackDepth(thread);
3052       if (stack_depth < single_step_control->GetStackDepth()) {
3053         event_flags |= kSingleStep;
3054         VLOG(jdwp) << "SS method pop";
3055       }
3056     }
3057   }
3058 
3059   // If there's something interesting going on, see if it matches one
3060   // of the debugger filters.
3061   if (event_flags != 0) {
3062     Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
3063   }
3064 }
3065 
GetReferenceCounterForEvent(uint32_t instrumentation_event)3066 size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
3067   switch (instrumentation_event) {
3068     case instrumentation::Instrumentation::kMethodEntered:
3069       return &method_enter_event_ref_count_;
3070     case instrumentation::Instrumentation::kMethodExited:
3071       return &method_exit_event_ref_count_;
3072     case instrumentation::Instrumentation::kDexPcMoved:
3073       return &dex_pc_change_event_ref_count_;
3074     case instrumentation::Instrumentation::kFieldRead:
3075       return &field_read_event_ref_count_;
3076     case instrumentation::Instrumentation::kFieldWritten:
3077       return &field_write_event_ref_count_;
3078     case instrumentation::Instrumentation::kExceptionCaught:
3079       return &exception_catch_event_ref_count_;
3080     default:
3081       return nullptr;
3082   }
3083 }
3084 
3085 // Process request while all mutator threads are suspended.
ProcessDeoptimizationRequest(const DeoptimizationRequest & request)3086 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
3087   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3088   switch (request.GetKind()) {
3089     case DeoptimizationRequest::kNothing:
3090       LOG(WARNING) << "Ignoring empty deoptimization request.";
3091       break;
3092     case DeoptimizationRequest::kRegisterForEvent:
3093       VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
3094                                  request.InstrumentationEvent());
3095       instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
3096       instrumentation_events_ |= request.InstrumentationEvent();
3097       break;
3098     case DeoptimizationRequest::kUnregisterForEvent:
3099       VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
3100                                  request.InstrumentationEvent());
3101       instrumentation->RemoveListener(&gDebugInstrumentationListener,
3102                                       request.InstrumentationEvent());
3103       instrumentation_events_ &= ~request.InstrumentationEvent();
3104       break;
3105     case DeoptimizationRequest::kFullDeoptimization:
3106       VLOG(jdwp) << "Deoptimize the world ...";
3107       instrumentation->DeoptimizeEverything(kDbgInstrumentationKey);
3108       VLOG(jdwp) << "Deoptimize the world DONE";
3109       break;
3110     case DeoptimizationRequest::kFullUndeoptimization:
3111       VLOG(jdwp) << "Undeoptimize the world ...";
3112       instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey);
3113       VLOG(jdwp) << "Undeoptimize the world DONE";
3114       break;
3115     case DeoptimizationRequest::kSelectiveDeoptimization:
3116       VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
3117       instrumentation->Deoptimize(request.Method());
3118       VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
3119       break;
3120     case DeoptimizationRequest::kSelectiveUndeoptimization:
3121       VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
3122       instrumentation->Undeoptimize(request.Method());
3123       VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
3124       break;
3125     default:
3126       LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
3127       break;
3128   }
3129 }
3130 
RequestDeoptimization(const DeoptimizationRequest & req)3131 void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
3132   if (req.GetKind() == DeoptimizationRequest::kNothing) {
3133     // Nothing to do.
3134     return;
3135   }
3136   MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3137   RequestDeoptimizationLocked(req);
3138 }
3139 
RequestDeoptimizationLocked(const DeoptimizationRequest & req)3140 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3141   switch (req.GetKind()) {
3142     case DeoptimizationRequest::kRegisterForEvent: {
3143       DCHECK_NE(req.InstrumentationEvent(), 0u);
3144       size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3145       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3146                                                 req.InstrumentationEvent());
3147       if (*counter == 0) {
3148         VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3149                                    deoptimization_requests_.size(), req.InstrumentationEvent());
3150         deoptimization_requests_.push_back(req);
3151       }
3152       *counter = *counter + 1;
3153       break;
3154     }
3155     case DeoptimizationRequest::kUnregisterForEvent: {
3156       DCHECK_NE(req.InstrumentationEvent(), 0u);
3157       size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3158       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3159                                                 req.InstrumentationEvent());
3160       *counter = *counter - 1;
3161       if (*counter == 0) {
3162         VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3163                                    deoptimization_requests_.size(), req.InstrumentationEvent());
3164         deoptimization_requests_.push_back(req);
3165       }
3166       break;
3167     }
3168     case DeoptimizationRequest::kFullDeoptimization: {
3169       DCHECK(req.Method() == nullptr);
3170       if (full_deoptimization_event_count_ == 0) {
3171         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3172                    << " for full deoptimization";
3173         deoptimization_requests_.push_back(req);
3174       }
3175       ++full_deoptimization_event_count_;
3176       break;
3177     }
3178     case DeoptimizationRequest::kFullUndeoptimization: {
3179       DCHECK(req.Method() == nullptr);
3180       DCHECK_GT(full_deoptimization_event_count_, 0U);
3181       --full_deoptimization_event_count_;
3182       if (full_deoptimization_event_count_ == 0) {
3183         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3184                    << " for full undeoptimization";
3185         deoptimization_requests_.push_back(req);
3186       }
3187       break;
3188     }
3189     case DeoptimizationRequest::kSelectiveDeoptimization: {
3190       DCHECK(req.Method() != nullptr);
3191       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3192                  << " for deoptimization of " << PrettyMethod(req.Method());
3193       deoptimization_requests_.push_back(req);
3194       break;
3195     }
3196     case DeoptimizationRequest::kSelectiveUndeoptimization: {
3197       DCHECK(req.Method() != nullptr);
3198       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3199                  << " for undeoptimization of " << PrettyMethod(req.Method());
3200       deoptimization_requests_.push_back(req);
3201       break;
3202     }
3203     default: {
3204       LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3205       break;
3206     }
3207   }
3208 }
3209 
ManageDeoptimization()3210 void Dbg::ManageDeoptimization() {
3211   Thread* const self = Thread::Current();
3212   {
3213     // Avoid suspend/resume if there is no pending request.
3214     MutexLock mu(self, *Locks::deoptimization_lock_);
3215     if (deoptimization_requests_.empty()) {
3216       return;
3217     }
3218   }
3219   CHECK_EQ(self->GetState(), kRunnable);
3220   ScopedThreadSuspension sts(self, kWaitingForDeoptimization);
3221   // Required for ProcessDeoptimizationRequest.
3222   gc::ScopedGCCriticalSection gcs(self,
3223                                   gc::kGcCauseInstrumentation,
3224                                   gc::kCollectorTypeInstrumentation);
3225   // We need to suspend mutator threads first.
3226   ScopedSuspendAll ssa(__FUNCTION__);
3227   const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3228   {
3229     MutexLock mu(self, *Locks::deoptimization_lock_);
3230     size_t req_index = 0;
3231     for (DeoptimizationRequest& request : deoptimization_requests_) {
3232       VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3233       ProcessDeoptimizationRequest(request);
3234     }
3235     deoptimization_requests_.clear();
3236   }
3237   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3238 }
3239 
FindFirstBreakpointForMethod(ArtMethod * m)3240 static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
3241     SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3242   for (Breakpoint& breakpoint : gBreakpoints) {
3243     if (breakpoint.IsInMethod(m)) {
3244       return &breakpoint;
3245     }
3246   }
3247   return nullptr;
3248 }
3249 
MethodHasAnyBreakpoints(ArtMethod * method)3250 bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) {
3251   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3252   return FindFirstBreakpointForMethod(method) != nullptr;
3253 }
3254 
3255 // Sanity checks all existing breakpoints on the same method.
SanityCheckExistingBreakpoints(ArtMethod * m,DeoptimizationRequest::Kind deoptimization_kind)3256 static void SanityCheckExistingBreakpoints(ArtMethod* m,
3257                                            DeoptimizationRequest::Kind deoptimization_kind)
3258     SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3259   for (const Breakpoint& breakpoint : gBreakpoints) {
3260     if (breakpoint.IsInMethod(m)) {
3261       CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
3262     }
3263   }
3264   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3265   if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3266     // We should have deoptimized everything but not "selectively" deoptimized this method.
3267     CHECK(instrumentation->AreAllMethodsDeoptimized());
3268     CHECK(!instrumentation->IsDeoptimized(m));
3269   } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3270     // We should have "selectively" deoptimized this method.
3271     // Note: while we have not deoptimized everything for this method, we may have done it for
3272     // another event.
3273     CHECK(instrumentation->IsDeoptimized(m));
3274   } else {
3275     // This method does not require deoptimization.
3276     CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3277     CHECK(!instrumentation->IsDeoptimized(m));
3278   }
3279 }
3280 
3281 // Returns the deoptimization kind required to set a breakpoint in a method.
3282 // If a breakpoint has already been set, we also return the first breakpoint
3283 // through the given 'existing_brkpt' pointer.
GetRequiredDeoptimizationKind(Thread * self,ArtMethod * m,const Breakpoint ** existing_brkpt)3284 static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
3285                                                                  ArtMethod* m,
3286                                                                  const Breakpoint** existing_brkpt)
3287     SHARED_REQUIRES(Locks::mutator_lock_) {
3288   if (!Dbg::RequiresDeoptimization()) {
3289     // We already run in interpreter-only mode so we don't need to deoptimize anything.
3290     VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
3291                << PrettyMethod(m);
3292     return DeoptimizationRequest::kNothing;
3293   }
3294   const Breakpoint* first_breakpoint;
3295   {
3296     ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3297     first_breakpoint = FindFirstBreakpointForMethod(m);
3298     *existing_brkpt = first_breakpoint;
3299   }
3300 
3301   if (first_breakpoint == nullptr) {
3302     // There is no breakpoint on this method yet: we need to deoptimize. If this method is default,
3303     // we deoptimize everything; otherwise we deoptimize only this method. We
3304     // deoptimize with defaults because we do not know everywhere they are used. It is possible some
3305     // of the copies could be missed.
3306     // TODO Deoptimizing on default methods might not be necessary in all cases.
3307     bool need_full_deoptimization = m->IsDefault();
3308     if (need_full_deoptimization) {
3309       VLOG(jdwp) << "Need full deoptimization because of copying of method "
3310                  << PrettyMethod(m);
3311       return DeoptimizationRequest::kFullDeoptimization;
3312     } else {
3313       // We don't need to deoptimize if the method has not been compiled.
3314       const bool is_compiled = m->HasAnyCompiledCode();
3315       if (is_compiled) {
3316         VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
3317         return DeoptimizationRequest::kSelectiveDeoptimization;
3318       } else {
3319         // Method is not compiled: we don't need to deoptimize.
3320         VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
3321         return DeoptimizationRequest::kNothing;
3322       }
3323     }
3324   } else {
3325     // There is at least one breakpoint for this method: we don't need to deoptimize.
3326     // Let's check that all breakpoints are configured the same way for deoptimization.
3327     VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
3328     DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
3329     if (kIsDebugBuild) {
3330       ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3331       SanityCheckExistingBreakpoints(m, deoptimization_kind);
3332     }
3333     return DeoptimizationRequest::kNothing;
3334   }
3335 }
3336 
3337 // Installs a breakpoint at the specified location. Also indicates through the deoptimization
3338 // request if we need to deoptimize.
WatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3339 void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3340   Thread* const self = Thread::Current();
3341   ArtMethod* m = FromMethodId(location->method_id);
3342   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3343 
3344   const Breakpoint* existing_breakpoint = nullptr;
3345   const DeoptimizationRequest::Kind deoptimization_kind =
3346       GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
3347   req->SetKind(deoptimization_kind);
3348   if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3349     req->SetMethod(m);
3350   } else {
3351     CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
3352           deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
3353     req->SetMethod(nullptr);
3354   }
3355 
3356   {
3357     WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3358     // If there is at least one existing breakpoint on the same method, the new breakpoint
3359     // must have the same deoptimization kind than the existing breakpoint(s).
3360     DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
3361     if (existing_breakpoint != nullptr) {
3362       breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
3363     } else {
3364       breakpoint_deoptimization_kind = deoptimization_kind;
3365     }
3366     gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
3367     VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3368                << gBreakpoints[gBreakpoints.size() - 1];
3369   }
3370 }
3371 
3372 // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3373 // request if we need to undeoptimize.
UnwatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3374 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3375   WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3376   ArtMethod* m = FromMethodId(location->method_id);
3377   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3378   DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
3379   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3380     if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].IsInMethod(m)) {
3381       VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3382       deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
3383       DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
3384                 Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3385       gBreakpoints.erase(gBreakpoints.begin() + i);
3386       break;
3387     }
3388   }
3389   const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3390   if (existing_breakpoint == nullptr) {
3391     // There is no more breakpoint on this method: we need to undeoptimize.
3392     if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3393       // This method required full deoptimization: we need to undeoptimize everything.
3394       req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3395       req->SetMethod(nullptr);
3396     } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3397       // This method required selective deoptimization: we need to undeoptimize only that method.
3398       req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3399       req->SetMethod(m);
3400     } else {
3401       // This method had no need for deoptimization: do nothing.
3402       CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3403       req->SetKind(DeoptimizationRequest::kNothing);
3404       req->SetMethod(nullptr);
3405     }
3406   } else {
3407     // There is at least one breakpoint for this method: we don't need to undeoptimize.
3408     req->SetKind(DeoptimizationRequest::kNothing);
3409     req->SetMethod(nullptr);
3410     if (kIsDebugBuild) {
3411       SanityCheckExistingBreakpoints(m, deoptimization_kind);
3412     }
3413   }
3414 }
3415 
IsForcedInterpreterNeededForCallingImpl(Thread * thread,ArtMethod * m)3416 bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) {
3417   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3418   if (ssc == nullptr) {
3419     // If we are not single-stepping, then we don't have to force interpreter.
3420     return false;
3421   }
3422   if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3423     // If we are in interpreter only mode, then we don't have to force interpreter.
3424     return false;
3425   }
3426 
3427   if (!m->IsNative() && !m->IsProxyMethod()) {
3428     // If we want to step into a method, then we have to force interpreter on that call.
3429     if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3430       return true;
3431     }
3432   }
3433   return false;
3434 }
3435 
IsForcedInterpreterNeededForResolutionImpl(Thread * thread,ArtMethod * m)3436 bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3437   instrumentation::Instrumentation* const instrumentation =
3438       Runtime::Current()->GetInstrumentation();
3439   // If we are in interpreter only mode, then we don't have to force interpreter.
3440   if (instrumentation->InterpretOnly()) {
3441     return false;
3442   }
3443   // We can only interpret pure Java method.
3444   if (m->IsNative() || m->IsProxyMethod()) {
3445     return false;
3446   }
3447   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3448   if (ssc != nullptr) {
3449     // If we want to step into a method, then we have to force interpreter on that call.
3450     if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3451       return true;
3452     }
3453     // If we are stepping out from a static initializer, by issuing a step
3454     // in or step over, that was implicitly invoked by calling a static method,
3455     // then we need to step into that method. Having a lower stack depth than
3456     // the one the single step control has indicates that the step originates
3457     // from the static initializer.
3458     if (ssc->GetStepDepth() != JDWP::SD_OUT &&
3459         ssc->GetStackDepth() > GetStackDepth(thread)) {
3460       return true;
3461     }
3462   }
3463   // There are cases where we have to force interpreter on deoptimized methods,
3464   // because in some cases the call will not be performed by invoking an entry
3465   // point that has been replaced by the deoptimization, but instead by directly
3466   // invoking the compiled code of the method, for example.
3467   return instrumentation->IsDeoptimized(m);
3468 }
3469 
IsForcedInstrumentationNeededForResolutionImpl(Thread * thread,ArtMethod * m)3470 bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3471   // The upcall can be null and in that case we don't need to do anything.
3472   if (m == nullptr) {
3473     return false;
3474   }
3475   instrumentation::Instrumentation* const instrumentation =
3476       Runtime::Current()->GetInstrumentation();
3477   // If we are in interpreter only mode, then we don't have to force interpreter.
3478   if (instrumentation->InterpretOnly()) {
3479     return false;
3480   }
3481   // We can only interpret pure Java method.
3482   if (m->IsNative() || m->IsProxyMethod()) {
3483     return false;
3484   }
3485   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3486   if (ssc != nullptr) {
3487     // If we are stepping out from a static initializer, by issuing a step
3488     // out, that was implicitly invoked by calling a static method, then we
3489     // need to step into the caller of that method. Having a lower stack
3490     // depth than the one the single step control has indicates that the
3491     // step originates from the static initializer.
3492     if (ssc->GetStepDepth() == JDWP::SD_OUT &&
3493         ssc->GetStackDepth() > GetStackDepth(thread)) {
3494       return true;
3495     }
3496   }
3497   // If we are returning from a static intializer, that was implicitly
3498   // invoked by calling a static method and the caller is deoptimized,
3499   // then we have to deoptimize the stack without forcing interpreter
3500   // on the static method that was called originally. This problem can
3501   // be solved easily by forcing instrumentation on the called method,
3502   // because the instrumentation exit hook will recognise the need of
3503   // stack deoptimization by calling IsForcedInterpreterNeededForUpcall.
3504   return instrumentation->IsDeoptimized(m);
3505 }
3506 
IsForcedInterpreterNeededForUpcallImpl(Thread * thread,ArtMethod * m)3507 bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
3508   // The upcall can be null and in that case we don't need to do anything.
3509   if (m == nullptr) {
3510     return false;
3511   }
3512   instrumentation::Instrumentation* const instrumentation =
3513       Runtime::Current()->GetInstrumentation();
3514   // If we are in interpreter only mode, then we don't have to force interpreter.
3515   if (instrumentation->InterpretOnly()) {
3516     return false;
3517   }
3518   // We can only interpret pure Java method.
3519   if (m->IsNative() || m->IsProxyMethod()) {
3520     return false;
3521   }
3522   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3523   if (ssc != nullptr) {
3524     // The debugger is not interested in what is happening under the level
3525     // of the step, thus we only force interpreter when we are not below of
3526     // the step.
3527     if (ssc->GetStackDepth() >= GetStackDepth(thread)) {
3528       return true;
3529     }
3530   }
3531   if (thread->HasDebuggerShadowFrames()) {
3532     // We need to deoptimize the stack for the exception handling flow so that
3533     // we don't miss any deoptimization that should be done when there are
3534     // debugger shadow frames.
3535     return true;
3536   }
3537   // We have to require stack deoptimization if the upcall is deoptimized.
3538   return instrumentation->IsDeoptimized(m);
3539 }
3540 
3541 class NeedsDeoptimizationVisitor : public StackVisitor {
3542  public:
3543   explicit NeedsDeoptimizationVisitor(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)3544       SHARED_REQUIRES(Locks::mutator_lock_)
3545     : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3546       needs_deoptimization_(false) {}
3547 
VisitFrame()3548   bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
3549     // The visitor is meant to be used when handling exception from compiled code only.
3550     CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: " << PrettyMethod(GetMethod());
3551     ArtMethod* method = GetMethod();
3552     if (method == nullptr) {
3553       // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
3554       // so we can stop the visit.
3555       DCHECK(!needs_deoptimization_);
3556       return false;
3557     }
3558     if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3559       // We found a compiled frame in the stack but instrumentation is set to interpret
3560       // everything: we need to deoptimize.
3561       needs_deoptimization_ = true;
3562       return false;
3563     }
3564     if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
3565       // We found a deoptimized method in the stack.
3566       needs_deoptimization_ = true;
3567       return false;
3568     }
3569     ShadowFrame* frame = GetThread()->FindDebuggerShadowFrame(GetFrameId());
3570     if (frame != nullptr) {
3571       // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
3572       // deoptimize the stack to execute (and deallocate) this frame.
3573       needs_deoptimization_ = true;
3574       return false;
3575     }
3576     return true;
3577   }
3578 
NeedsDeoptimization() const3579   bool NeedsDeoptimization() const {
3580     return needs_deoptimization_;
3581   }
3582 
3583  private:
3584   // Do we need to deoptimize the stack?
3585   bool needs_deoptimization_;
3586 
3587   DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
3588 };
3589 
3590 // Do we need to deoptimize the stack to handle an exception?
IsForcedInterpreterNeededForExceptionImpl(Thread * thread)3591 bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
3592   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3593   if (ssc != nullptr) {
3594     // We deopt to step into the catch handler.
3595     return true;
3596   }
3597   // Deoptimization is required if at least one method in the stack needs it. However we
3598   // skip frames that will be unwound (thus not executed).
3599   NeedsDeoptimizationVisitor visitor(thread);
3600   visitor.WalkStack(true);  // includes upcall.
3601   return visitor.NeedsDeoptimization();
3602 }
3603 
3604 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3605 // cause suspension if the thread is the current thread.
3606 class ScopedDebuggerThreadSuspension {
3607  public:
ScopedDebuggerThreadSuspension(Thread * self,JDWP::ObjectId thread_id)3608   ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3609       REQUIRES(!Locks::thread_list_lock_)
3610       SHARED_REQUIRES(Locks::mutator_lock_) :
3611       thread_(nullptr),
3612       error_(JDWP::ERR_NONE),
3613       self_suspend_(false),
3614       other_suspend_(false) {
3615     ScopedObjectAccessUnchecked soa(self);
3616     thread_ = DecodeThread(soa, thread_id, &error_);
3617     if (error_ == JDWP::ERR_NONE) {
3618       if (thread_ == soa.Self()) {
3619         self_suspend_ = true;
3620       } else {
3621         Thread* suspended_thread;
3622         {
3623           ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
3624           jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3625           bool timed_out;
3626           ThreadList* const thread_list = Runtime::Current()->GetThreadList();
3627           suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3628         }
3629         if (suspended_thread == nullptr) {
3630           // Thread terminated from under us while suspending.
3631           error_ = JDWP::ERR_INVALID_THREAD;
3632         } else {
3633           CHECK_EQ(suspended_thread, thread_);
3634           other_suspend_ = true;
3635         }
3636       }
3637     }
3638   }
3639 
GetThread() const3640   Thread* GetThread() const {
3641     return thread_;
3642   }
3643 
GetError() const3644   JDWP::JdwpError GetError() const {
3645     return error_;
3646   }
3647 
~ScopedDebuggerThreadSuspension()3648   ~ScopedDebuggerThreadSuspension() {
3649     if (other_suspend_) {
3650       Runtime::Current()->GetThreadList()->Resume(thread_, true);
3651     }
3652   }
3653 
3654  private:
3655   Thread* thread_;
3656   JDWP::JdwpError error_;
3657   bool self_suspend_;
3658   bool other_suspend_;
3659 };
3660 
ConfigureStep(JDWP::ObjectId thread_id,JDWP::JdwpStepSize step_size,JDWP::JdwpStepDepth step_depth)3661 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3662                                    JDWP::JdwpStepDepth step_depth) {
3663   Thread* self = Thread::Current();
3664   ScopedDebuggerThreadSuspension sts(self, thread_id);
3665   if (sts.GetError() != JDWP::ERR_NONE) {
3666     return sts.GetError();
3667   }
3668 
3669   // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
3670   // is for step-out.
3671   struct SingleStepStackVisitor : public StackVisitor {
3672     explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
3673         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3674           stack_depth(0),
3675           method(nullptr),
3676           line_number(-1) {}
3677 
3678     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3679     // annotalysis.
3680     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3681       ArtMethod* m = GetMethod();
3682       if (!m->IsRuntimeMethod()) {
3683         ++stack_depth;
3684         if (method == nullptr) {
3685           mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3686           method = m;
3687           if (dex_cache != nullptr) {
3688             const DexFile& dex_file = *dex_cache->GetDexFile();
3689             line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
3690           }
3691         }
3692       }
3693       return true;
3694     }
3695 
3696     int stack_depth;
3697     ArtMethod* method;
3698     int32_t line_number;
3699   };
3700 
3701   Thread* const thread = sts.GetThread();
3702   SingleStepStackVisitor visitor(thread);
3703   visitor.WalkStack();
3704 
3705   // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3706   struct DebugCallbackContext {
3707     DebugCallbackContext(SingleStepControl* single_step_control_cb,
3708                          int32_t line_number_cb, const DexFile::CodeItem* code_item)
3709         : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
3710           code_item_(code_item), last_pc_valid(false), last_pc(0) {
3711     }
3712 
3713     static bool Callback(void* raw_context, const DexFile::PositionInfo& entry) {
3714       DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3715       if (static_cast<int32_t>(entry.line_) == context->line_number_) {
3716         if (!context->last_pc_valid) {
3717           // Everything from this address until the next line change is ours.
3718           context->last_pc = entry.address_;
3719           context->last_pc_valid = true;
3720         }
3721         // Otherwise, if we're already in a valid range for this line,
3722         // just keep going (shouldn't really happen)...
3723       } else if (context->last_pc_valid) {  // and the line number is new
3724         // Add everything from the last entry up until here to the set
3725         for (uint32_t dex_pc = context->last_pc; dex_pc < entry.address_; ++dex_pc) {
3726           context->single_step_control_->AddDexPc(dex_pc);
3727         }
3728         context->last_pc_valid = false;
3729       }
3730       return false;  // There may be multiple entries for any given line.
3731     }
3732 
3733     ~DebugCallbackContext() {
3734       // If the line number was the last in the position table...
3735       if (last_pc_valid) {
3736         size_t end = code_item_->insns_size_in_code_units_;
3737         for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3738           single_step_control_->AddDexPc(dex_pc);
3739         }
3740       }
3741     }
3742 
3743     SingleStepControl* const single_step_control_;
3744     const int32_t line_number_;
3745     const DexFile::CodeItem* const code_item_;
3746     bool last_pc_valid;
3747     uint32_t last_pc;
3748   };
3749 
3750   // Allocate single step.
3751   SingleStepControl* single_step_control =
3752       new (std::nothrow) SingleStepControl(step_size, step_depth,
3753                                            visitor.stack_depth, visitor.method);
3754   if (single_step_control == nullptr) {
3755     LOG(ERROR) << "Failed to allocate SingleStepControl";
3756     return JDWP::ERR_OUT_OF_MEMORY;
3757   }
3758 
3759   ArtMethod* m = single_step_control->GetMethod();
3760   const int32_t line_number = visitor.line_number;
3761   // Note: if the thread is not running Java code (pure native thread), there is no "current"
3762   // method on the stack (and no line number either).
3763   if (m != nullptr && !m->IsNative()) {
3764     const DexFile::CodeItem* const code_item = m->GetCodeItem();
3765     DebugCallbackContext context(single_step_control, line_number, code_item);
3766     m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
3767   }
3768 
3769   // Activate single-step in the thread.
3770   thread->ActivateSingleStepControl(single_step_control);
3771 
3772   if (VLOG_IS_ON(jdwp)) {
3773     VLOG(jdwp) << "Single-step thread: " << *thread;
3774     VLOG(jdwp) << "Single-step step size: " << single_step_control->GetStepSize();
3775     VLOG(jdwp) << "Single-step step depth: " << single_step_control->GetStepDepth();
3776     VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->GetMethod());
3777     VLOG(jdwp) << "Single-step current line: " << line_number;
3778     VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->GetStackDepth();
3779     VLOG(jdwp) << "Single-step dex_pc values:";
3780     for (uint32_t dex_pc : single_step_control->GetDexPcs()) {
3781       VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3782     }
3783   }
3784 
3785   return JDWP::ERR_NONE;
3786 }
3787 
UnconfigureStep(JDWP::ObjectId thread_id)3788 void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3789   ScopedObjectAccessUnchecked soa(Thread::Current());
3790   JDWP::JdwpError error;
3791   Thread* thread = DecodeThread(soa, thread_id, &error);
3792   if (error == JDWP::ERR_NONE) {
3793     thread->DeactivateSingleStepControl();
3794   }
3795 }
3796 
JdwpTagToShortyChar(JDWP::JdwpTag tag)3797 static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3798   switch (tag) {
3799     default:
3800       LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3801       UNREACHABLE();
3802 
3803     // Primitives.
3804     case JDWP::JT_BYTE:    return 'B';
3805     case JDWP::JT_CHAR:    return 'C';
3806     case JDWP::JT_FLOAT:   return 'F';
3807     case JDWP::JT_DOUBLE:  return 'D';
3808     case JDWP::JT_INT:     return 'I';
3809     case JDWP::JT_LONG:    return 'J';
3810     case JDWP::JT_SHORT:   return 'S';
3811     case JDWP::JT_VOID:    return 'V';
3812     case JDWP::JT_BOOLEAN: return 'Z';
3813 
3814     // Reference types.
3815     case JDWP::JT_ARRAY:
3816     case JDWP::JT_OBJECT:
3817     case JDWP::JT_STRING:
3818     case JDWP::JT_THREAD:
3819     case JDWP::JT_THREAD_GROUP:
3820     case JDWP::JT_CLASS_LOADER:
3821     case JDWP::JT_CLASS_OBJECT:
3822       return 'L';
3823   }
3824 }
3825 
PrepareInvokeMethod(uint32_t request_id,JDWP::ObjectId thread_id,JDWP::ObjectId object_id,JDWP::RefTypeId class_id,JDWP::MethodId method_id,uint32_t arg_count,uint64_t arg_values[],JDWP::JdwpTag * arg_types,uint32_t options)3826 JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
3827                                          JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
3828                                          JDWP::MethodId method_id, uint32_t arg_count,
3829                                          uint64_t arg_values[], JDWP::JdwpTag* arg_types,
3830                                          uint32_t options) {
3831   Thread* const self = Thread::Current();
3832   CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
3833   const bool resume_all_threads = ((options & JDWP::INVOKE_SINGLE_THREADED) == 0);
3834 
3835   ThreadList* thread_list = Runtime::Current()->GetThreadList();
3836   Thread* targetThread = nullptr;
3837   {
3838     ScopedObjectAccessUnchecked soa(self);
3839     JDWP::JdwpError error;
3840     targetThread = DecodeThread(soa, thread_id, &error);
3841     if (error != JDWP::ERR_NONE) {
3842       LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3843       return error;
3844     }
3845     if (targetThread->GetInvokeReq() != nullptr) {
3846       // Thread is already invoking a method on behalf of the debugger.
3847       LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
3848       return JDWP::ERR_ALREADY_INVOKING;
3849     }
3850     if (!targetThread->IsReadyForDebugInvoke()) {
3851       // Thread is not suspended by an event so it cannot invoke a method.
3852       LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3853       return JDWP::ERR_INVALID_THREAD;
3854     }
3855 
3856     /*
3857      * According to the JDWP specs, we are expected to resume all threads (or only the
3858      * target thread) once. So if a thread has been suspended more than once (either by
3859      * the debugger for an event or by the runtime for GC), it will remain suspended before
3860      * the invoke is executed. This means the debugger is responsible to properly resume all
3861      * the threads it has suspended so the target thread can execute the method.
3862      *
3863      * However, for compatibility reason with older versions of debuggers (like Eclipse), we
3864      * fully resume all threads (by canceling *all* debugger suspensions) when the debugger
3865      * wants us to resume all threads. This is to avoid ending up in deadlock situation.
3866      *
3867      * On the other hand, if we are asked to only resume the target thread, then we follow the
3868      * JDWP specs by resuming that thread only once. This means the thread will remain suspended
3869      * if it has been suspended more than once before the invoke (and again, this is the
3870      * responsibility of the debugger to properly resume that thread before invoking a method).
3871      */
3872     int suspend_count;
3873     {
3874       MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3875       suspend_count = targetThread->GetSuspendCount();
3876     }
3877     if (suspend_count > 1 && resume_all_threads) {
3878       // The target thread will remain suspended even after we resume it. Let's emit a warning
3879       // to indicate the invoke won't be executed until the thread is resumed.
3880       LOG(WARNING) << *targetThread << " suspended more than once (suspend count == "
3881                    << suspend_count << "). This thread will invoke the method only once "
3882                    << "it is fully resumed.";
3883     }
3884 
3885     mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
3886     if (error != JDWP::ERR_NONE) {
3887       return JDWP::ERR_INVALID_OBJECT;
3888     }
3889 
3890     gRegistry->Get<mirror::Object*>(thread_id, &error);
3891     if (error != JDWP::ERR_NONE) {
3892       return JDWP::ERR_INVALID_OBJECT;
3893     }
3894 
3895     mirror::Class* c = DecodeClass(class_id, &error);
3896     if (c == nullptr) {
3897       return error;
3898     }
3899 
3900     ArtMethod* m = FromMethodId(method_id);
3901     if (m->IsStatic() != (receiver == nullptr)) {
3902       return JDWP::ERR_INVALID_METHODID;
3903     }
3904     if (m->IsStatic()) {
3905       if (m->GetDeclaringClass() != c) {
3906         return JDWP::ERR_INVALID_METHODID;
3907       }
3908     } else {
3909       if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3910         return JDWP::ERR_INVALID_METHODID;
3911       }
3912     }
3913 
3914     // Check the argument list matches the method.
3915     uint32_t shorty_len = 0;
3916     const char* shorty = m->GetShorty(&shorty_len);
3917     if (shorty_len - 1 != arg_count) {
3918       return JDWP::ERR_ILLEGAL_ARGUMENT;
3919     }
3920 
3921     {
3922       StackHandleScope<2> hs(soa.Self());
3923       HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3924       HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3925       const DexFile::TypeList* types = m->GetParameterTypeList();
3926       for (size_t i = 0; i < arg_count; ++i) {
3927         if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3928           return JDWP::ERR_ILLEGAL_ARGUMENT;
3929         }
3930 
3931         if (shorty[i + 1] == 'L') {
3932           // Did we really get an argument of an appropriate reference type?
3933           mirror::Class* parameter_type =
3934               m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_,
3935                                        true /* resolve */,
3936                                        sizeof(void*));
3937           mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
3938           if (error != JDWP::ERR_NONE) {
3939             return JDWP::ERR_INVALID_OBJECT;
3940           }
3941           if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
3942             return JDWP::ERR_ILLEGAL_ARGUMENT;
3943           }
3944 
3945           // Turn the on-the-wire ObjectId into a jobject.
3946           jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3947           v.l = gRegistry->GetJObject(arg_values[i]);
3948         }
3949       }
3950     }
3951 
3952     // Allocates a DebugInvokeReq.
3953     DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
3954                                                             options, arg_values, arg_count);
3955     if (req == nullptr) {
3956       LOG(ERROR) << "Failed to allocate DebugInvokeReq";
3957       return JDWP::ERR_OUT_OF_MEMORY;
3958     }
3959 
3960     // Attaches the DebugInvokeReq to the target thread so it executes the method when
3961     // it is resumed. Once the invocation completes, the target thread will delete it before
3962     // suspending itself (see ThreadList::SuspendSelfForDebugger).
3963     targetThread->SetDebugInvokeReq(req);
3964   }
3965 
3966   // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3967   // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
3968   // call.
3969   if (resume_all_threads) {
3970     VLOG(jdwp) << "      Resuming all threads";
3971     thread_list->UndoDebuggerSuspensions();
3972   } else {
3973     VLOG(jdwp) << "      Resuming event thread only";
3974     thread_list->Resume(targetThread, true);
3975   }
3976 
3977   return JDWP::ERR_NONE;
3978 }
3979 
ExecuteMethod(DebugInvokeReq * pReq)3980 void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3981   Thread* const self = Thread::Current();
3982   CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
3983 
3984   ScopedObjectAccess soa(self);
3985 
3986   // We can be called while an exception is pending. We need
3987   // to preserve that across the method invocation.
3988   StackHandleScope<1> hs(soa.Self());
3989   Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
3990   soa.Self()->ClearException();
3991 
3992   // Execute the method then sends reply to the debugger.
3993   ExecuteMethodWithoutPendingException(soa, pReq);
3994 
3995   // If an exception was pending before the invoke, restore it now.
3996   if (old_exception.Get() != nullptr) {
3997     soa.Self()->SetException(old_exception.Get());
3998   }
3999 }
4000 
4001 // Helper function: write a variable-width value into the output input buffer.
WriteValue(JDWP::ExpandBuf * pReply,int width,uint64_t value)4002 static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
4003   switch (width) {
4004     case 1:
4005       expandBufAdd1(pReply, value);
4006       break;
4007     case 2:
4008       expandBufAdd2BE(pReply, value);
4009       break;
4010     case 4:
4011       expandBufAdd4BE(pReply, value);
4012       break;
4013     case 8:
4014       expandBufAdd8BE(pReply, value);
4015       break;
4016     default:
4017       LOG(FATAL) << width;
4018       UNREACHABLE();
4019   }
4020 }
4021 
ExecuteMethodWithoutPendingException(ScopedObjectAccess & soa,DebugInvokeReq * pReq)4022 void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
4023   soa.Self()->AssertNoPendingException();
4024 
4025   // Translate the method through the vtable, unless the debugger wants to suppress it.
4026   ArtMethod* m = pReq->method;
4027   size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
4028   if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
4029     ArtMethod* actual_method =
4030         pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
4031     if (actual_method != m) {
4032       VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m)
4033                  << " to " << PrettyMethod(actual_method);
4034       m = actual_method;
4035     }
4036   }
4037   VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m)
4038              << " receiver=" << pReq->receiver.Read()
4039              << " arg_count=" << pReq->arg_count;
4040   CHECK(m != nullptr);
4041 
4042   static_assert(sizeof(jvalue) == sizeof(uint64_t), "jvalue and uint64_t have different sizes.");
4043 
4044   // Invoke the method.
4045   ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
4046   JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
4047                                     reinterpret_cast<jvalue*>(pReq->arg_values.get()));
4048 
4049   // Prepare JDWP ids for the reply.
4050   JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
4051   const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
4052   StackHandleScope<2> hs(soa.Self());
4053   Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
4054   Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
4055   soa.Self()->ClearException();
4056 
4057   if (!IsDebuggerActive()) {
4058     // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
4059     // because it won't be sent either.
4060     return;
4061   }
4062 
4063   JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
4064   uint64_t result_value = 0;
4065   if (exceptionObjectId != 0) {
4066     VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception.Get()
4067                << " " << exception->Dump();
4068     result_value = 0;
4069   } else if (is_object_result) {
4070     /* if no exception was thrown, examine object result more closely */
4071     JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
4072     if (new_tag != result_tag) {
4073       VLOG(jdwp) << "  JDWP promoted result from " << result_tag << " to " << new_tag;
4074       result_tag = new_tag;
4075     }
4076 
4077     // Register the object in the registry and reference its ObjectId. This ensures
4078     // GC safety and prevents from accessing stale reference if the object is moved.
4079     result_value = gRegistry->Add(object_result.Get());
4080   } else {
4081     // Primitive result.
4082     DCHECK(IsPrimitiveTag(result_tag));
4083     result_value = result.GetJ();
4084   }
4085   const bool is_constructor = m->IsConstructor() && !m->IsStatic();
4086   if (is_constructor) {
4087     // If we invoked a constructor (which actually returns void), return the receiver,
4088     // unless we threw, in which case we return null.
4089     DCHECK_EQ(JDWP::JT_VOID, result_tag);
4090     if (exceptionObjectId == 0) {
4091       // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
4092       // object registry.
4093       result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
4094       result_tag = TagFromObject(soa, pReq->receiver.Read());
4095     } else {
4096       result_value = 0;
4097       result_tag = JDWP::JT_OBJECT;
4098     }
4099   }
4100 
4101   // Suspend other threads if the invoke is not single-threaded.
4102   if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
4103     ScopedThreadSuspension sts(soa.Self(), kWaitingForDebuggerSuspension);
4104     VLOG(jdwp) << "      Suspending all threads";
4105     Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
4106   }
4107 
4108   VLOG(jdwp) << "  --> returned " << result_tag
4109              << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
4110                              exceptionObjectId);
4111 
4112   // Show detailed debug output.
4113   if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
4114     if (result_value != 0) {
4115       if (VLOG_IS_ON(jdwp)) {
4116         std::string result_string;
4117         JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
4118         CHECK_EQ(error, JDWP::ERR_NONE);
4119         VLOG(jdwp) << "      string '" << result_string << "'";
4120       }
4121     } else {
4122       VLOG(jdwp) << "      string (null)";
4123     }
4124   }
4125 
4126   // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
4127   // is ready to suspend.
4128   BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
4129 }
4130 
BuildInvokeReply(JDWP::ExpandBuf * pReply,uint32_t request_id,JDWP::JdwpTag result_tag,uint64_t result_value,JDWP::ObjectId exception)4131 void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
4132                            uint64_t result_value, JDWP::ObjectId exception) {
4133   // Make room for the JDWP header since we do not know the size of the reply yet.
4134   JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
4135 
4136   size_t width = GetTagWidth(result_tag);
4137   JDWP::expandBufAdd1(pReply, result_tag);
4138   if (width != 0) {
4139     WriteValue(pReply, width, result_value);
4140   }
4141   JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
4142   JDWP::expandBufAddObjectId(pReply, exception);
4143 
4144   // Now we know the size, we can complete the JDWP header.
4145   uint8_t* buf = expandBufGetBuffer(pReply);
4146   JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
4147   JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
4148   JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);  // flags
4149   JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
4150 }
4151 
FinishInvokeMethod(DebugInvokeReq * pReq)4152 void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
4153   CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
4154 
4155   JDWP::ExpandBuf* const pReply = pReq->reply;
4156   CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
4157 
4158   // We need to prevent other threads (including JDWP thread) from interacting with the debugger
4159   // while we send the reply but are not yet suspended. The JDWP token will be released just before
4160   // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
4161   gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
4162 
4163   // Send the reply unless the debugger detached before the completion of the method.
4164   if (IsDebuggerActive()) {
4165     const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
4166     VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
4167                                pReq->request_id, replyDataLength);
4168 
4169     gJdwpState->SendRequest(pReply);
4170   } else {
4171     VLOG(jdwp) << "Not sending invoke reply because debugger detached";
4172   }
4173 }
4174 
4175 /*
4176  * "request" contains a full JDWP packet, possibly with multiple chunks.  We
4177  * need to process each, accumulate the replies, and ship the whole thing
4178  * back.
4179  *
4180  * Returns "true" if we have a reply.  The reply buffer is newly allocated,
4181  * and includes the chunk type/length, followed by the data.
4182  *
4183  * OLD-TODO: we currently assume that the request and reply include a single
4184  * chunk.  If this becomes inconvenient we will need to adapt.
4185  */
DdmHandlePacket(JDWP::Request * request,uint8_t ** pReplyBuf,int * pReplyLen)4186 bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
4187   Thread* self = Thread::Current();
4188   JNIEnv* env = self->GetJniEnv();
4189 
4190   uint32_t type = request->ReadUnsigned32("type");
4191   uint32_t length = request->ReadUnsigned32("length");
4192 
4193   // Create a byte[] corresponding to 'request'.
4194   size_t request_length = request->size();
4195   ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
4196   if (dataArray.get() == nullptr) {
4197     LOG(WARNING) << "byte[] allocation failed: " << request_length;
4198     env->ExceptionClear();
4199     return false;
4200   }
4201   env->SetByteArrayRegion(dataArray.get(), 0, request_length,
4202                           reinterpret_cast<const jbyte*>(request->data()));
4203   request->Skip(request_length);
4204 
4205   // Run through and find all chunks.  [Currently just find the first.]
4206   ScopedByteArrayRO contents(env, dataArray.get());
4207   if (length != request_length) {
4208     LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
4209     return false;
4210   }
4211 
4212   // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
4213   ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4214                                                                  WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
4215                                                                  type, dataArray.get(), 0, length));
4216   if (env->ExceptionCheck()) {
4217     LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
4218     env->ExceptionDescribe();
4219     env->ExceptionClear();
4220     return false;
4221   }
4222 
4223   if (chunk.get() == nullptr) {
4224     return false;
4225   }
4226 
4227   /*
4228    * Pull the pieces out of the chunk.  We copy the results into a
4229    * newly-allocated buffer that the caller can free.  We don't want to
4230    * continue using the Chunk object because nothing has a reference to it.
4231    *
4232    * We could avoid this by returning type/data/offset/length and having
4233    * the caller be aware of the object lifetime issues, but that
4234    * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
4235    * if we have responses for multiple chunks.
4236    *
4237    * So we're pretty much stuck with copying data around multiple times.
4238    */
4239   ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
4240   jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
4241   length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
4242   type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
4243 
4244   VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
4245   if (length == 0 || replyData.get() == nullptr) {
4246     return false;
4247   }
4248 
4249   const int kChunkHdrLen = 8;
4250   uint8_t* reply = new uint8_t[length + kChunkHdrLen];
4251   if (reply == nullptr) {
4252     LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
4253     return false;
4254   }
4255   JDWP::Set4BE(reply + 0, type);
4256   JDWP::Set4BE(reply + 4, length);
4257   env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
4258 
4259   *pReplyBuf = reply;
4260   *pReplyLen = length + kChunkHdrLen;
4261 
4262   VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
4263   return true;
4264 }
4265 
DdmBroadcast(bool connect)4266 void Dbg::DdmBroadcast(bool connect) {
4267   VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
4268 
4269   Thread* self = Thread::Current();
4270   if (self->GetState() != kRunnable) {
4271     LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
4272     /* try anyway? */
4273   }
4274 
4275   JNIEnv* env = self->GetJniEnv();
4276   jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
4277   env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4278                             WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
4279                             event);
4280   if (env->ExceptionCheck()) {
4281     LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
4282     env->ExceptionDescribe();
4283     env->ExceptionClear();
4284   }
4285 }
4286 
DdmConnected()4287 void Dbg::DdmConnected() {
4288   Dbg::DdmBroadcast(true);
4289 }
4290 
DdmDisconnected()4291 void Dbg::DdmDisconnected() {
4292   Dbg::DdmBroadcast(false);
4293   gDdmThreadNotification = false;
4294 }
4295 
4296 /*
4297  * Send a notification when a thread starts, stops, or changes its name.
4298  *
4299  * Because we broadcast the full set of threads when the notifications are
4300  * first enabled, it's possible for "thread" to be actively executing.
4301  */
DdmSendThreadNotification(Thread * t,uint32_t type)4302 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
4303   if (!gDdmThreadNotification) {
4304     return;
4305   }
4306 
4307   if (type == CHUNK_TYPE("THDE")) {
4308     uint8_t buf[4];
4309     JDWP::Set4BE(&buf[0], t->GetThreadId());
4310     Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
4311   } else {
4312     CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
4313     ScopedObjectAccessUnchecked soa(Thread::Current());
4314     StackHandleScope<1> hs(soa.Self());
4315     Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
4316     size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
4317     const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr;
4318 
4319     std::vector<uint8_t> bytes;
4320     JDWP::Append4BE(bytes, t->GetThreadId());
4321     JDWP::AppendUtf16BE(bytes, chars, char_count);
4322     CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
4323     Dbg::DdmSendChunk(type, bytes);
4324   }
4325 }
4326 
DdmSetThreadNotification(bool enable)4327 void Dbg::DdmSetThreadNotification(bool enable) {
4328   // Enable/disable thread notifications.
4329   gDdmThreadNotification = enable;
4330   if (enable) {
4331     // Suspend the VM then post thread start notifications for all threads. Threads attaching will
4332     // see a suspension in progress and block until that ends. They then post their own start
4333     // notification.
4334     SuspendVM();
4335     std::list<Thread*> threads;
4336     Thread* self = Thread::Current();
4337     {
4338       MutexLock mu(self, *Locks::thread_list_lock_);
4339       threads = Runtime::Current()->GetThreadList()->GetList();
4340     }
4341     {
4342       ScopedObjectAccess soa(self);
4343       for (Thread* thread : threads) {
4344         Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
4345       }
4346     }
4347     ResumeVM();
4348   }
4349 }
4350 
PostThreadStartOrStop(Thread * t,uint32_t type)4351 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
4352   if (IsDebuggerActive()) {
4353     gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
4354   }
4355   Dbg::DdmSendThreadNotification(t, type);
4356 }
4357 
PostThreadStart(Thread * t)4358 void Dbg::PostThreadStart(Thread* t) {
4359   Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
4360 }
4361 
PostThreadDeath(Thread * t)4362 void Dbg::PostThreadDeath(Thread* t) {
4363   Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
4364 }
4365 
DdmSendChunk(uint32_t type,size_t byte_count,const uint8_t * buf)4366 void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
4367   CHECK(buf != nullptr);
4368   iovec vec[1];
4369   vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
4370   vec[0].iov_len = byte_count;
4371   Dbg::DdmSendChunkV(type, vec, 1);
4372 }
4373 
DdmSendChunk(uint32_t type,const std::vector<uint8_t> & bytes)4374 void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
4375   DdmSendChunk(type, bytes.size(), &bytes[0]);
4376 }
4377 
DdmSendChunkV(uint32_t type,const iovec * iov,int iov_count)4378 void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
4379   if (gJdwpState == nullptr) {
4380     VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
4381   } else {
4382     gJdwpState->DdmSendChunkV(type, iov, iov_count);
4383   }
4384 }
4385 
GetJdwpState()4386 JDWP::JdwpState* Dbg::GetJdwpState() {
4387   return gJdwpState;
4388 }
4389 
DdmHandleHpifChunk(HpifWhen when)4390 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
4391   if (when == HPIF_WHEN_NOW) {
4392     DdmSendHeapInfo(when);
4393     return true;
4394   }
4395 
4396   if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
4397     LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
4398     return false;
4399   }
4400 
4401   gDdmHpifWhen = when;
4402   return true;
4403 }
4404 
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)4405 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
4406   if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
4407     LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
4408     return false;
4409   }
4410 
4411   if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
4412     LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
4413     return false;
4414   }
4415 
4416   if (native) {
4417     gDdmNhsgWhen = when;
4418     gDdmNhsgWhat = what;
4419   } else {
4420     gDdmHpsgWhen = when;
4421     gDdmHpsgWhat = what;
4422   }
4423   return true;
4424 }
4425 
DdmSendHeapInfo(HpifWhen reason)4426 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
4427   // If there's a one-shot 'when', reset it.
4428   if (reason == gDdmHpifWhen) {
4429     if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
4430       gDdmHpifWhen = HPIF_WHEN_NEVER;
4431     }
4432   }
4433 
4434   /*
4435    * Chunk HPIF (client --> server)
4436    *
4437    * Heap Info. General information about the heap,
4438    * suitable for a summary display.
4439    *
4440    *   [u4]: number of heaps
4441    *
4442    *   For each heap:
4443    *     [u4]: heap ID
4444    *     [u8]: timestamp in ms since Unix epoch
4445    *     [u1]: capture reason (same as 'when' value from server)
4446    *     [u4]: max heap size in bytes (-Xmx)
4447    *     [u4]: current heap size in bytes
4448    *     [u4]: current number of bytes allocated
4449    *     [u4]: current number of objects allocated
4450    */
4451   uint8_t heap_count = 1;
4452   gc::Heap* heap = Runtime::Current()->GetHeap();
4453   std::vector<uint8_t> bytes;
4454   JDWP::Append4BE(bytes, heap_count);
4455   JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
4456   JDWP::Append8BE(bytes, MilliTime());
4457   JDWP::Append1BE(bytes, reason);
4458   JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
4459   JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
4460   JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4461   JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4462   CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4463   Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4464 }
4465 
4466 enum HpsgSolidity {
4467   SOLIDITY_FREE = 0,
4468   SOLIDITY_HARD = 1,
4469   SOLIDITY_SOFT = 2,
4470   SOLIDITY_WEAK = 3,
4471   SOLIDITY_PHANTOM = 4,
4472   SOLIDITY_FINALIZABLE = 5,
4473   SOLIDITY_SWEEP = 6,
4474 };
4475 
4476 enum HpsgKind {
4477   KIND_OBJECT = 0,
4478   KIND_CLASS_OBJECT = 1,
4479   KIND_ARRAY_1 = 2,
4480   KIND_ARRAY_2 = 3,
4481   KIND_ARRAY_4 = 4,
4482   KIND_ARRAY_8 = 5,
4483   KIND_UNKNOWN = 6,
4484   KIND_NATIVE = 7,
4485 };
4486 
4487 #define HPSG_PARTIAL (1<<7)
4488 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4489 
4490 class HeapChunkContext {
4491  public:
4492   // Maximum chunk size.  Obtain this from the formula:
4493   // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)4494   HeapChunkContext(bool merge, bool native)
4495       : buf_(16384 - 16),
4496         type_(0),
4497         chunk_overhead_(0) {
4498     Reset();
4499     if (native) {
4500       type_ = CHUNK_TYPE("NHSG");
4501     } else {
4502       type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4503     }
4504   }
4505 
~HeapChunkContext()4506   ~HeapChunkContext() {
4507     if (p_ > &buf_[0]) {
4508       Flush();
4509     }
4510   }
4511 
SetChunkOverhead(size_t chunk_overhead)4512   void SetChunkOverhead(size_t chunk_overhead) {
4513     chunk_overhead_ = chunk_overhead;
4514   }
4515 
ResetStartOfNextChunk()4516   void ResetStartOfNextChunk() {
4517     startOfNextMemoryChunk_ = nullptr;
4518   }
4519 
EnsureHeader(const void * chunk_ptr)4520   void EnsureHeader(const void* chunk_ptr) {
4521     if (!needHeader_) {
4522       return;
4523     }
4524 
4525     // Start a new HPSx chunk.
4526     JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4527     JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4528 
4529     JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4530     JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4531     // [u4]: length of piece, in allocation units
4532     // We won't know this until we're done, so save the offset and stuff in a dummy value.
4533     pieceLenField_ = p_;
4534     JDWP::Write4BE(&p_, 0x55555555);
4535     needHeader_ = false;
4536   }
4537 
Flush()4538   void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
4539     if (pieceLenField_ == nullptr) {
4540       // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4541       CHECK(needHeader_);
4542       return;
4543     }
4544     // Patch the "length of piece" field.
4545     CHECK_LE(&buf_[0], pieceLenField_);
4546     CHECK_LE(pieceLenField_, p_);
4547     JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4548 
4549     Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4550     Reset();
4551   }
4552 
HeapChunkJavaCallback(void * start,void * end,size_t used_bytes,void * arg)4553   static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
4554       SHARED_REQUIRES(Locks::heap_bitmap_lock_,
4555                             Locks::mutator_lock_) {
4556     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
4557   }
4558 
HeapChunkNativeCallback(void * start,void * end,size_t used_bytes,void * arg)4559   static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
4560       SHARED_REQUIRES(Locks::mutator_lock_) {
4561     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
4562   }
4563 
4564  private:
4565   enum { ALLOCATION_UNIT_SIZE = 8 };
4566 
Reset()4567   void Reset() {
4568     p_ = &buf_[0];
4569     ResetStartOfNextChunk();
4570     totalAllocationUnits_ = 0;
4571     needHeader_ = true;
4572     pieceLenField_ = nullptr;
4573   }
4574 
IsNative() const4575   bool IsNative() const {
4576     return type_ == CHUNK_TYPE("NHSG");
4577   }
4578 
4579   // Returns true if the object is not an empty chunk.
ProcessRecord(void * start,size_t used_bytes)4580   bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
4581     // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4582     // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4583     if (used_bytes == 0) {
4584       if (start == nullptr) {
4585         // Reset for start of new heap.
4586         startOfNextMemoryChunk_ = nullptr;
4587         Flush();
4588       }
4589       // Only process in use memory so that free region information
4590       // also includes dlmalloc book keeping.
4591       return false;
4592     }
4593     if (startOfNextMemoryChunk_ != nullptr) {
4594       // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
4595       // of the use of mmaps, so don't report. If not free memory then start a new segment.
4596       bool flush = true;
4597       if (start > startOfNextMemoryChunk_) {
4598         const size_t kMaxFreeLen = 2 * kPageSize;
4599         void* free_start = startOfNextMemoryChunk_;
4600         void* free_end = start;
4601         const size_t free_len =
4602             reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
4603         if (!IsNative() || free_len < kMaxFreeLen) {
4604           AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
4605           flush = false;
4606         }
4607       }
4608       if (flush) {
4609         startOfNextMemoryChunk_ = nullptr;
4610         Flush();
4611       }
4612     }
4613     return true;
4614   }
4615 
HeapChunkNativeCallback(void * start,void *,size_t used_bytes)4616   void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
4617       SHARED_REQUIRES(Locks::mutator_lock_) {
4618     if (ProcessRecord(start, used_bytes)) {
4619       uint8_t state = ExamineNativeObject(start);
4620       AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
4621       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4622     }
4623   }
4624 
HeapChunkJavaCallback(void * start,void *,size_t used_bytes)4625   void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
4626       SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
4627     if (ProcessRecord(start, used_bytes)) {
4628       // Determine the type of this chunk.
4629       // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4630       // If it's the same, we should combine them.
4631       uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
4632       AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
4633       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4634     }
4635   }
4636 
AppendChunk(uint8_t state,void * ptr,size_t length,bool is_native)4637   void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
4638       SHARED_REQUIRES(Locks::mutator_lock_) {
4639     // Make sure there's enough room left in the buffer.
4640     // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4641     // 17 bytes for any header.
4642     const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
4643     size_t byte_left = &buf_.back() - p_;
4644     if (byte_left < needed) {
4645       if (is_native) {
4646       // Cannot trigger memory allocation while walking native heap.
4647         return;
4648       }
4649       Flush();
4650     }
4651 
4652     byte_left = &buf_.back() - p_;
4653     if (byte_left < needed) {
4654       LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4655           << needed << " bytes)";
4656       return;
4657     }
4658     EnsureHeader(ptr);
4659     // Write out the chunk description.
4660     length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4661     totalAllocationUnits_ += length;
4662     while (length > 256) {
4663       *p_++ = state | HPSG_PARTIAL;
4664       *p_++ = 255;     // length - 1
4665       length -= 256;
4666     }
4667     *p_++ = state;
4668     *p_++ = length - 1;
4669   }
4670 
ExamineNativeObject(const void * p)4671   uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
4672     return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4673   }
4674 
ExamineJavaObject(mirror::Object * o)4675   uint8_t ExamineJavaObject(mirror::Object* o)
4676       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4677     if (o == nullptr) {
4678       return HPSG_STATE(SOLIDITY_FREE, 0);
4679     }
4680     // It's an allocated chunk. Figure out what it is.
4681     gc::Heap* heap = Runtime::Current()->GetHeap();
4682     if (!heap->IsLiveObjectLocked(o)) {
4683       LOG(ERROR) << "Invalid object in managed heap: " << o;
4684       return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4685     }
4686     mirror::Class* c = o->GetClass();
4687     if (c == nullptr) {
4688       // The object was probably just created but hasn't been initialized yet.
4689       return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4690     }
4691     if (!heap->IsValidObjectAddress(c)) {
4692       LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4693       return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4694     }
4695     if (c->GetClass() == nullptr) {
4696       LOG(ERROR) << "Null class of class " << c << " for object " << o;
4697       return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4698     }
4699     if (c->IsClassClass()) {
4700       return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4701     }
4702     if (c->IsArrayClass()) {
4703       switch (c->GetComponentSize()) {
4704       case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4705       case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4706       case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4707       case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4708       }
4709     }
4710     return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4711   }
4712 
4713   std::vector<uint8_t> buf_;
4714   uint8_t* p_;
4715   uint8_t* pieceLenField_;
4716   void* startOfNextMemoryChunk_;
4717   size_t totalAllocationUnits_;
4718   uint32_t type_;
4719   bool needHeader_;
4720   size_t chunk_overhead_;
4721 
4722   DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4723 };
4724 
BumpPointerSpaceCallback(mirror::Object * obj,void * arg)4725 static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4726     SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
4727   const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4728   HeapChunkContext::HeapChunkJavaCallback(
4729       obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4730 }
4731 
DdmSendHeapSegments(bool native)4732 void Dbg::DdmSendHeapSegments(bool native) {
4733   Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
4734   Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
4735   if (when == HPSG_WHEN_NEVER) {
4736     return;
4737   }
4738   // Figure out what kind of chunks we'll be sending.
4739   CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
4740       << static_cast<int>(what);
4741 
4742   // First, send a heap start chunk.
4743   uint8_t heap_id[4];
4744   JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4745   Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4746   Thread* self = Thread::Current();
4747   Locks::mutator_lock_->AssertSharedHeld(self);
4748 
4749   // Send a series of heap segment chunks.
4750   HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
4751   if (native) {
4752     UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
4753   } else {
4754     gc::Heap* heap = Runtime::Current()->GetHeap();
4755     for (const auto& space : heap->GetContinuousSpaces()) {
4756       if (space->IsDlMallocSpace()) {
4757         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4758         // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4759         // allocation then the first sizeof(size_t) may belong to it.
4760         context.SetChunkOverhead(sizeof(size_t));
4761         space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4762       } else if (space->IsRosAllocSpace()) {
4763         context.SetChunkOverhead(0);
4764         // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
4765         // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
4766         ScopedThreadSuspension sts(self, kSuspended);
4767         ScopedSuspendAll ssa(__FUNCTION__);
4768         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4769         space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4770       } else if (space->IsBumpPointerSpace()) {
4771         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4772         context.SetChunkOverhead(0);
4773         space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4774         HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4775       } else if (space->IsRegionSpace()) {
4776         heap->IncrementDisableMovingGC(self);
4777         {
4778           ScopedThreadSuspension sts(self, kSuspended);
4779           ScopedSuspendAll ssa(__FUNCTION__);
4780           ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4781           context.SetChunkOverhead(0);
4782           space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
4783           HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4784         }
4785         heap->DecrementDisableMovingGC(self);
4786       } else {
4787         UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4788       }
4789       context.ResetStartOfNextChunk();
4790     }
4791     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4792     // Walk the large objects, these are not in the AllocSpace.
4793     context.SetChunkOverhead(0);
4794     heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4795   }
4796 
4797   // Finally, send a heap end chunk.
4798   Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4799 }
4800 
SetAllocTrackingEnabled(bool enable)4801 void Dbg::SetAllocTrackingEnabled(bool enable) {
4802   gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
4803 }
4804 
DumpRecentAllocations()4805 void Dbg::DumpRecentAllocations() {
4806   ScopedObjectAccess soa(Thread::Current());
4807   MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4808   if (!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
4809     LOG(INFO) << "Not recording tracked allocations";
4810     return;
4811   }
4812   gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
4813   CHECK(records != nullptr);
4814 
4815   const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
4816   uint16_t count = capped_count;
4817 
4818   LOG(INFO) << "Tracked allocations, (count=" << count << ")";
4819   for (auto it = records->RBegin(), end = records->REnd();
4820       count > 0 && it != end; count--, it++) {
4821     const gc::AllocRecord* record = &it->second;
4822 
4823     LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount())
4824               << PrettyClass(record->GetClass());
4825 
4826     for (size_t stack_frame = 0, depth = record->GetDepth(); stack_frame < depth; ++stack_frame) {
4827       const gc::AllocRecordStackTraceElement& stack_element = record->StackElement(stack_frame);
4828       ArtMethod* m = stack_element.GetMethod();
4829       LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element.ComputeLineNumber();
4830     }
4831 
4832     // pause periodically to help logcat catch up
4833     if ((count % 5) == 0) {
4834       usleep(40000);
4835     }
4836   }
4837 }
4838 
4839 class StringTable {
4840  public:
StringTable()4841   StringTable() {
4842   }
4843 
Add(const std::string & str)4844   void Add(const std::string& str) {
4845     table_.insert(str);
4846   }
4847 
Add(const char * str)4848   void Add(const char* str) {
4849     table_.insert(str);
4850   }
4851 
IndexOf(const char * s) const4852   size_t IndexOf(const char* s) const {
4853     auto it = table_.find(s);
4854     if (it == table_.end()) {
4855       LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4856     }
4857     return std::distance(table_.begin(), it);
4858   }
4859 
Size() const4860   size_t Size() const {
4861     return table_.size();
4862   }
4863 
WriteTo(std::vector<uint8_t> & bytes) const4864   void WriteTo(std::vector<uint8_t>& bytes) const {
4865     for (const std::string& str : table_) {
4866       const char* s = str.c_str();
4867       size_t s_len = CountModifiedUtf8Chars(s);
4868       std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
4869       ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4870       JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4871     }
4872   }
4873 
4874  private:
4875   std::set<std::string> table_;
4876   DISALLOW_COPY_AND_ASSIGN(StringTable);
4877 };
4878 
GetMethodSourceFile(ArtMethod * method)4879 static const char* GetMethodSourceFile(ArtMethod* method)
4880     SHARED_REQUIRES(Locks::mutator_lock_) {
4881   DCHECK(method != nullptr);
4882   const char* source_file = method->GetDeclaringClassSourceFile();
4883   return (source_file != nullptr) ? source_file : "";
4884 }
4885 
4886 /*
4887  * The data we send to DDMS contains everything we have recorded.
4888  *
4889  * Message header (all values big-endian):
4890  * (1b) message header len (to allow future expansion); includes itself
4891  * (1b) entry header len
4892  * (1b) stack frame len
4893  * (2b) number of entries
4894  * (4b) offset to string table from start of message
4895  * (2b) number of class name strings
4896  * (2b) number of method name strings
4897  * (2b) number of source file name strings
4898  * For each entry:
4899  *   (4b) total allocation size
4900  *   (2b) thread id
4901  *   (2b) allocated object's class name index
4902  *   (1b) stack depth
4903  *   For each stack frame:
4904  *     (2b) method's class name
4905  *     (2b) method name
4906  *     (2b) method source file
4907  *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4908  * (xb) class name strings
4909  * (xb) method name strings
4910  * (xb) source file strings
4911  *
4912  * As with other DDM traffic, strings are sent as a 4-byte length
4913  * followed by UTF-16 data.
4914  *
4915  * We send up 16-bit unsigned indexes into string tables.  In theory there
4916  * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4917  * each table, but in practice there should be far fewer.
4918  *
4919  * The chief reason for using a string table here is to keep the size of
4920  * the DDMS message to a minimum.  This is partly to make the protocol
4921  * efficient, but also because we have to form the whole thing up all at
4922  * once in a memory buffer.
4923  *
4924  * We use separate string tables for class names, method names, and source
4925  * files to keep the indexes small.  There will generally be no overlap
4926  * between the contents of these tables.
4927  */
GetRecentAllocations()4928 jbyteArray Dbg::GetRecentAllocations() {
4929   if ((false)) {
4930     DumpRecentAllocations();
4931   }
4932 
4933   Thread* self = Thread::Current();
4934   std::vector<uint8_t> bytes;
4935   {
4936     MutexLock mu(self, *Locks::alloc_tracker_lock_);
4937     gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
4938     // In case this method is called when allocation tracker is disabled,
4939     // we should still send some data back.
4940     gc::AllocRecordObjectMap dummy;
4941     if (records == nullptr) {
4942       CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
4943       records = &dummy;
4944     }
4945     // We don't need to wait on the condition variable records->new_record_condition_, because this
4946     // function only reads the class objects, which are already marked so it doesn't change their
4947     // reachability.
4948 
4949     //
4950     // Part 1: generate string tables.
4951     //
4952     StringTable class_names;
4953     StringTable method_names;
4954     StringTable filenames;
4955 
4956     const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
4957     uint16_t count = capped_count;
4958     for (auto it = records->RBegin(), end = records->REnd();
4959          count > 0 && it != end; count--, it++) {
4960       const gc::AllocRecord* record = &it->second;
4961       std::string temp;
4962       class_names.Add(record->GetClassDescriptor(&temp));
4963       for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
4964         ArtMethod* m = record->StackElement(i).GetMethod();
4965         class_names.Add(m->GetDeclaringClassDescriptor());
4966         method_names.Add(m->GetName());
4967         filenames.Add(GetMethodSourceFile(m));
4968       }
4969     }
4970 
4971     LOG(INFO) << "recent allocation records: " << capped_count;
4972     LOG(INFO) << "allocation records all objects: " << records->Size();
4973 
4974     //
4975     // Part 2: Generate the output and store it in the buffer.
4976     //
4977 
4978     // (1b) message header len (to allow future expansion); includes itself
4979     // (1b) entry header len
4980     // (1b) stack frame len
4981     const int kMessageHeaderLen = 15;
4982     const int kEntryHeaderLen = 9;
4983     const int kStackFrameLen = 8;
4984     JDWP::Append1BE(bytes, kMessageHeaderLen);
4985     JDWP::Append1BE(bytes, kEntryHeaderLen);
4986     JDWP::Append1BE(bytes, kStackFrameLen);
4987 
4988     // (2b) number of entries
4989     // (4b) offset to string table from start of message
4990     // (2b) number of class name strings
4991     // (2b) number of method name strings
4992     // (2b) number of source file name strings
4993     JDWP::Append2BE(bytes, capped_count);
4994     size_t string_table_offset = bytes.size();
4995     JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4996     JDWP::Append2BE(bytes, class_names.Size());
4997     JDWP::Append2BE(bytes, method_names.Size());
4998     JDWP::Append2BE(bytes, filenames.Size());
4999 
5000     std::string temp;
5001     count = capped_count;
5002     // The last "count" number of allocation records in "records" are the most recent "count" number
5003     // of allocations. Reverse iterate to get them. The most recent allocation is sent first.
5004     for (auto it = records->RBegin(), end = records->REnd();
5005          count > 0 && it != end; count--, it++) {
5006       // For each entry:
5007       // (4b) total allocation size
5008       // (2b) thread id
5009       // (2b) allocated object's class name index
5010       // (1b) stack depth
5011       const gc::AllocRecord* record = &it->second;
5012       size_t stack_depth = record->GetDepth();
5013       size_t allocated_object_class_name_index =
5014           class_names.IndexOf(record->GetClassDescriptor(&temp));
5015       JDWP::Append4BE(bytes, record->ByteCount());
5016       JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
5017       JDWP::Append2BE(bytes, allocated_object_class_name_index);
5018       JDWP::Append1BE(bytes, stack_depth);
5019 
5020       for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
5021         // For each stack frame:
5022         // (2b) method's class name
5023         // (2b) method name
5024         // (2b) method source file
5025         // (2b) line number, clipped to 32767; -2 if native; -1 if no source
5026         ArtMethod* m = record->StackElement(stack_frame).GetMethod();
5027         size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
5028         size_t method_name_index = method_names.IndexOf(m->GetName());
5029         size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
5030         JDWP::Append2BE(bytes, class_name_index);
5031         JDWP::Append2BE(bytes, method_name_index);
5032         JDWP::Append2BE(bytes, file_name_index);
5033         JDWP::Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
5034       }
5035     }
5036 
5037     // (xb) class name strings
5038     // (xb) method name strings
5039     // (xb) source file strings
5040     JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
5041     class_names.WriteTo(bytes);
5042     method_names.WriteTo(bytes);
5043     filenames.WriteTo(bytes);
5044   }
5045   JNIEnv* env = self->GetJniEnv();
5046   jbyteArray result = env->NewByteArray(bytes.size());
5047   if (result != nullptr) {
5048     env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
5049   }
5050   return result;
5051 }
5052 
Method() const5053 ArtMethod* DeoptimizationRequest::Method() const {
5054   ScopedObjectAccessUnchecked soa(Thread::Current());
5055   return soa.DecodeMethod(method_);
5056 }
5057 
SetMethod(ArtMethod * m)5058 void DeoptimizationRequest::SetMethod(ArtMethod* m) {
5059   ScopedObjectAccessUnchecked soa(Thread::Current());
5060   method_ = soa.EncodeMethod(m);
5061 }
5062 
VisitRoots(RootVisitor * visitor)5063 void Dbg::VisitRoots(RootVisitor* visitor) {
5064   // Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
5065   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
5066   BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal));
5067   for (Breakpoint& breakpoint : gBreakpoints) {
5068     breakpoint.Method()->VisitRoots(root_visitor, sizeof(void*));
5069   }
5070 }
5071 
5072 }  // namespace art
5073