1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_INSTRUMENTATION_H_
18 #define ART_RUNTIME_INSTRUMENTATION_H_
19 
20 #include <stdint.h>
21 #include <list>
22 #include <memory>
23 #include <unordered_set>
24 
25 #include "arch/instruction_set.h"
26 #include "base/enums.h"
27 #include "base/locks.h"
28 #include "base/macros.h"
29 #include "base/safe_map.h"
30 #include "gc_root.h"
31 
32 namespace art {
33 namespace mirror {
34 class Class;
35 class Object;
36 class Throwable;
37 }  // namespace mirror
38 class ArtField;
39 class ArtMethod;
40 template <typename T> class Handle;
41 template <typename T> class MutableHandle;
42 union JValue;
43 class SHARED_LOCKABLE ReaderWriterMutex;
44 class ShadowFrame;
45 class Thread;
46 enum class DeoptimizationMethodType;
47 
48 namespace instrumentation {
49 
50 // Interpreter handler tables.
51 enum InterpreterHandlerTable {
52   kMainHandlerTable = 0,          // Main handler table: no suspend check, no instrumentation.
53   kAlternativeHandlerTable = 1,   // Alternative handler table: suspend check and/or instrumentation
54                                   // enabled.
55   kNumHandlerTables
56 };
57 
58 // Do we want to deoptimize for method entry and exit listeners or just try to intercept
59 // invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
60 // application's performance.
61 static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
62 
63 // Instrumentation event listener API. Registered listeners will get the appropriate call back for
64 // the events they are listening for. The call backs supply the thread, method and dex_pc the event
65 // occurred upon. The thread may or may not be Thread::Current().
66 struct InstrumentationListener {
InstrumentationListenerInstrumentationListener67   InstrumentationListener() {}
~InstrumentationListenerInstrumentationListener68   virtual ~InstrumentationListener() {}
69 
70   // Call-back for when a method is entered.
71   virtual void MethodEntered(Thread* thread,
72                              Handle<mirror::Object> this_object,
73                              ArtMethod* method,
74                              uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
75 
76   virtual void MethodExited(Thread* thread,
77                             Handle<mirror::Object> this_object,
78                             ArtMethod* method,
79                             uint32_t dex_pc,
80                             Handle<mirror::Object> return_value)
81       REQUIRES_SHARED(Locks::mutator_lock_);
82 
83   // Call-back for when a method is exited. The implementor should either handler-ize the return
84   // value (if appropriate) or use the alternate MethodExited callback instead if they need to
85   // go through a suspend point.
86   virtual void MethodExited(Thread* thread,
87                             Handle<mirror::Object> this_object,
88                             ArtMethod* method,
89                             uint32_t dex_pc,
90                             const JValue& return_value)
91       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
92 
93   // Call-back for when a method is popped due to an exception throw. A method will either cause a
94   // MethodExited call-back or a MethodUnwind call-back when its activation is removed.
95   virtual void MethodUnwind(Thread* thread,
96                             Handle<mirror::Object> this_object,
97                             ArtMethod* method,
98                             uint32_t dex_pc)
99       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
100 
101   // Call-back for when the dex pc moves in a method.
102   virtual void DexPcMoved(Thread* thread,
103                           Handle<mirror::Object> this_object,
104                           ArtMethod* method,
105                           uint32_t new_dex_pc)
106       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
107 
108   // Call-back for when we read from a field.
109   virtual void FieldRead(Thread* thread,
110                          Handle<mirror::Object> this_object,
111                          ArtMethod* method,
112                          uint32_t dex_pc,
113                          ArtField* field) = 0;
114 
115   virtual void FieldWritten(Thread* thread,
116                             Handle<mirror::Object> this_object,
117                             ArtMethod* method,
118                             uint32_t dex_pc,
119                             ArtField* field,
120                             Handle<mirror::Object> field_value)
121       REQUIRES_SHARED(Locks::mutator_lock_);
122 
123   // Call-back for when we write into a field.
124   virtual void FieldWritten(Thread* thread,
125                             Handle<mirror::Object> this_object,
126                             ArtMethod* method,
127                             uint32_t dex_pc,
128                             ArtField* field,
129                             const JValue& field_value)
130       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
131 
132   // Call-back when an exception is thrown.
133   virtual void ExceptionThrown(Thread* thread,
134                                Handle<mirror::Throwable> exception_object)
135       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
136 
137   // Call-back when an exception is caught/handled by java code.
138   virtual void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
139       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
140 
141   // Call-back for when we execute a branch.
142   virtual void Branch(Thread* thread,
143                       ArtMethod* method,
144                       uint32_t dex_pc,
145                       int32_t dex_pc_offset)
146       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
147 
148   // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
149   // either return or exceptions. Normally instrumentation listeners should ensure that there are
150   // shadow-frames by deoptimizing stacks.
151   virtual void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
152                                const ShadowFrame& frame ATTRIBUTE_UNUSED)
153       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
154 };
155 
156 class Instrumentation;
157 // A helper to send instrumentation events while popping the stack in a safe way.
158 class InstrumentationStackPopper {
159  public:
160   explicit InstrumentationStackPopper(Thread* self);
161   ~InstrumentationStackPopper() REQUIRES_SHARED(Locks::mutator_lock_);
162 
163   // Increase the number of frames being popped to 'desired_pops' return true if the frames were
164   // popped without any exceptions, false otherwise. The exception that caused the pop is
165   // 'exception'.
166   bool PopFramesTo(uint32_t desired_pops, /*in-out*/MutableHandle<mirror::Throwable>& exception)
167       REQUIRES_SHARED(Locks::mutator_lock_);
168 
169  private:
170   Thread* self_;
171   Instrumentation* instrumentation_;
172   uint32_t frames_to_remove_;
173 };
174 
175 // Instrumentation is a catch-all for when extra information is required from the runtime. The
176 // typical use for instrumentation is for profiling and debugging. Instrumentation may add stubs
177 // to method entry and exit, it may also force execution to be switched to the interpreter and
178 // trigger deoptimization.
179 class Instrumentation {
180  public:
181   enum InstrumentationEvent {
182     kMethodEntered = 0x1,
183     kMethodExited = 0x2,
184     kMethodUnwind = 0x4,
185     kDexPcMoved = 0x8,
186     kFieldRead = 0x10,
187     kFieldWritten = 0x20,
188     kExceptionThrown = 0x40,
189     kBranch = 0x80,
190     kWatchedFramePop = 0x200,
191     kExceptionHandled = 0x400,
192   };
193 
194   enum class InstrumentationLevel {
195     kInstrumentNothing,                   // execute without instrumentation
196     kInstrumentWithInstrumentationStubs,  // execute with instrumentation entry/exit stubs
197     kInstrumentWithInterpreter            // execute with interpreter
198   };
199 
200   Instrumentation();
201 
202   // Add a listener to be notified of the masked together sent of instrumentation events. This
203   // suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy
204   // for saying you should have suspended all threads (installing stubs while threads are running
205   // will break).
206   void AddListener(InstrumentationListener* listener, uint32_t events)
207       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
208 
209   // Removes a listener possibly removing instrumentation stubs.
210   void RemoveListener(InstrumentationListener* listener, uint32_t events)
211       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
212 
213   // Deoptimization.
214   void EnableDeoptimization()
215       REQUIRES(Locks::mutator_lock_)
216       REQUIRES(!GetDeoptimizedMethodsLock());
217   // Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
218   void DisableDeoptimization(const char* key)
219       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
220       REQUIRES(!GetDeoptimizedMethodsLock());
221 
AreAllMethodsDeoptimized()222   bool AreAllMethodsDeoptimized() const {
223     return interpreter_stubs_installed_;
224   }
225   bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
226 
CanDeoptimize()227   bool CanDeoptimize() {
228     return deoptimization_enabled_;
229   }
230 
231   // Executes everything with interpreter.
232   void DeoptimizeEverything(const char* key)
233       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
234       REQUIRES(!Locks::thread_list_lock_,
235                !Locks::classlinker_classes_lock_,
236                !GetDeoptimizedMethodsLock());
237 
238   // Executes everything with compiled code (or interpreter if there is no code). May visit class
239   // linker classes through ConfigureStubs.
240   void UndeoptimizeEverything(const char* key)
241       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
242       REQUIRES(!Locks::thread_list_lock_,
243                !Locks::classlinker_classes_lock_,
244                !GetDeoptimizedMethodsLock());
245 
246   // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
247   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
248   // once its declaring class is initialized.
249   void Deoptimize(ArtMethod* method)
250       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
251 
252   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
253   // (except a class initializer) set to the resolution trampoline will be updated only once its
254   // declaring class is initialized.
255   void Undeoptimize(ArtMethod* method)
256       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
257 
258   // Indicates whether the method has been deoptimized so it is executed with the interpreter.
259   bool IsDeoptimized(ArtMethod* method)
260       REQUIRES(!GetDeoptimizedMethodsLock()) REQUIRES_SHARED(Locks::mutator_lock_);
261 
262   // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
263   void EnableMethodTracing(const char* key,
264                            bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
265       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
266       REQUIRES(!Locks::thread_list_lock_,
267                !Locks::classlinker_classes_lock_,
268                !GetDeoptimizedMethodsLock());
269 
270   // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
271   void DisableMethodTracing(const char* key)
272       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
273       REQUIRES(!Locks::thread_list_lock_,
274                !Locks::classlinker_classes_lock_,
275                !GetDeoptimizedMethodsLock());
276 
GetInterpreterHandlerTable()277   InterpreterHandlerTable GetInterpreterHandlerTable() const
278       REQUIRES_SHARED(Locks::mutator_lock_) {
279     return interpreter_handler_table_;
280   }
281 
282   void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
283   void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
284   void InstrumentQuickAllocEntryPointsLocked()
285       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
286                !Locks::runtime_shutdown_lock_);
287   void UninstrumentQuickAllocEntryPointsLocked()
288       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
289                !Locks::runtime_shutdown_lock_);
290   void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_);
291 
292   // Update the code of a method respecting any installed stubs.
293   void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
294       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
295 
296   // Update the code of a native method to a JITed stub.
297   void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* quick_code)
298       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
299 
300   // Update the code of a method to the interpreter respecting any installed stubs from debugger.
301   void UpdateMethodsCodeToInterpreterEntryPoint(ArtMethod* method)
302       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
303 
304   // Update the code of a method respecting any installed stubs from debugger.
305   void UpdateMethodsCodeForJavaDebuggable(ArtMethod* method, const void* quick_code)
306       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
307 
308   // Return the code that we can execute for an invoke including from the JIT.
309   const void* GetCodeForInvoke(ArtMethod* method) const
310       REQUIRES_SHARED(Locks::mutator_lock_);
311 
312   // Get the quick code for the given method. More efficient than asking the class linker as it
313   // will short-cut to GetCode if instrumentation and static method resolution stubs aren't
314   // installed.
315   const void* GetQuickCodeFor(ArtMethod* method, PointerSize pointer_size) const
316       REQUIRES_SHARED(Locks::mutator_lock_);
317 
ForceInterpretOnly()318   void ForceInterpretOnly() {
319     interpret_only_ = true;
320     forced_interpret_only_ = true;
321   }
322 
323   // Called by ArtMethod::Invoke to determine dispatch mechanism.
InterpretOnly()324   bool InterpretOnly() const {
325     return interpret_only_;
326   }
327 
IsForcedInterpretOnly()328   bool IsForcedInterpretOnly() const {
329     return forced_interpret_only_;
330   }
331 
332   // Code is in boot image oat file which isn't compiled as debuggable.
333   // Need debug version (interpreter or jitted) if that's the case.
334   bool NeedDebugVersionFor(ArtMethod* method) const
335       REQUIRES_SHARED(Locks::mutator_lock_);
336 
AreExitStubsInstalled()337   bool AreExitStubsInstalled() const {
338     return instrumentation_stubs_installed_;
339   }
340 
HasMethodEntryListeners()341   bool HasMethodEntryListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
342     return have_method_entry_listeners_;
343   }
344 
HasMethodExitListeners()345   bool HasMethodExitListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
346     return have_method_exit_listeners_;
347   }
348 
HasMethodUnwindListeners()349   bool HasMethodUnwindListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
350     return have_method_unwind_listeners_;
351   }
352 
HasDexPcListeners()353   bool HasDexPcListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
354     return have_dex_pc_listeners_;
355   }
356 
HasFieldReadListeners()357   bool HasFieldReadListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
358     return have_field_read_listeners_;
359   }
360 
HasFieldWriteListeners()361   bool HasFieldWriteListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
362     return have_field_write_listeners_;
363   }
364 
HasExceptionThrownListeners()365   bool HasExceptionThrownListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
366     return have_exception_thrown_listeners_;
367   }
368 
HasBranchListeners()369   bool HasBranchListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
370     return have_branch_listeners_;
371   }
372 
HasWatchedFramePopListeners()373   bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
374     return have_watched_frame_pop_listeners_;
375   }
376 
HasExceptionHandledListeners()377   bool HasExceptionHandledListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
378     return have_exception_handled_listeners_;
379   }
380 
IsActive()381   bool IsActive() const REQUIRES_SHARED(Locks::mutator_lock_) {
382     return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
383         have_field_read_listeners_ || have_field_write_listeners_ ||
384         have_exception_thrown_listeners_ || have_method_unwind_listeners_ ||
385         have_branch_listeners_ || have_watched_frame_pop_listeners_ ||
386         have_exception_handled_listeners_;
387   }
388 
389   // Inform listeners that a method has been entered. A dex PC is provided as we may install
390   // listeners into executing code and get method enter events for methods already on the stack.
MethodEnterEvent(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc)391   void MethodEnterEvent(Thread* thread, mirror::Object* this_object,
392                         ArtMethod* method, uint32_t dex_pc) const
393       REQUIRES_SHARED(Locks::mutator_lock_) {
394     if (UNLIKELY(HasMethodEntryListeners())) {
395       MethodEnterEventImpl(thread, this_object, method, dex_pc);
396     }
397   }
398 
399   // Inform listeners that a method has been exited.
MethodExitEvent(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,const JValue & return_value)400   void MethodExitEvent(Thread* thread,
401                        mirror::Object* this_object,
402                        ArtMethod* method,
403                        uint32_t dex_pc,
404                        const JValue& return_value) const
405       REQUIRES_SHARED(Locks::mutator_lock_) {
406     if (UNLIKELY(HasMethodExitListeners())) {
407       MethodExitEventImpl(thread, this_object, method, dex_pc, return_value);
408     }
409   }
410 
411   // Inform listeners that a method has been exited due to an exception.
412   void MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
413                          ArtMethod* method, uint32_t dex_pc) const
414       REQUIRES_SHARED(Locks::mutator_lock_);
415 
416   // Inform listeners that the dex pc has moved (only supported by the interpreter).
DexPcMovedEvent(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc)417   void DexPcMovedEvent(Thread* thread, mirror::Object* this_object,
418                        ArtMethod* method, uint32_t dex_pc) const
419       REQUIRES_SHARED(Locks::mutator_lock_) {
420     if (UNLIKELY(HasDexPcListeners())) {
421       DexPcMovedEventImpl(thread, this_object, method, dex_pc);
422     }
423   }
424 
425   // Inform listeners that a branch has been taken (only supported by the interpreter).
Branch(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t offset)426   void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
427       REQUIRES_SHARED(Locks::mutator_lock_) {
428     if (UNLIKELY(HasBranchListeners())) {
429       BranchImpl(thread, method, dex_pc, offset);
430     }
431   }
432 
433   // Inform listeners that we read a field (only supported by the interpreter).
FieldReadEvent(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)434   void FieldReadEvent(Thread* thread, mirror::Object* this_object,
435                       ArtMethod* method, uint32_t dex_pc,
436                       ArtField* field) const
437       REQUIRES_SHARED(Locks::mutator_lock_) {
438     if (UNLIKELY(HasFieldReadListeners())) {
439       FieldReadEventImpl(thread, this_object, method, dex_pc, field);
440     }
441   }
442 
443   // Inform listeners that we write a field (only supported by the interpreter).
FieldWriteEvent(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)444   void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
445                        ArtMethod* method, uint32_t dex_pc,
446                        ArtField* field, const JValue& field_value) const
447       REQUIRES_SHARED(Locks::mutator_lock_) {
448     if (UNLIKELY(HasFieldWriteListeners())) {
449       FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
450     }
451   }
452 
453   // Inform listeners that a branch has been taken (only supported by the interpreter).
WatchedFramePopped(Thread * thread,const ShadowFrame & frame)454   void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const
455       REQUIRES_SHARED(Locks::mutator_lock_) {
456     if (UNLIKELY(HasWatchedFramePopListeners())) {
457       WatchedFramePopImpl(thread, frame);
458     }
459   }
460 
461   // Inform listeners that an exception was thrown.
462   void ExceptionThrownEvent(Thread* thread, mirror::Throwable* exception_object) const
463       REQUIRES_SHARED(Locks::mutator_lock_);
464 
465   // Inform listeners that an exception has been handled. This is not sent for native code or for
466   // exceptions which reach the end of the thread's stack.
467   void ExceptionHandledEvent(Thread* thread, mirror::Throwable* exception_object) const
468       REQUIRES_SHARED(Locks::mutator_lock_);
469 
470   // Called when an instrumented method is entered. The intended link register (lr) is saved so
471   // that returning causes a branch to the method exit stub. Generates method enter events.
472   void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
473                                      ArtMethod* method, uintptr_t lr,
474                                      bool interpreter_entry)
475       REQUIRES_SHARED(Locks::mutator_lock_);
476 
477   DeoptimizationMethodType GetDeoptimizationMethodType(ArtMethod* method)
478       REQUIRES_SHARED(Locks::mutator_lock_);
479 
480   // Called when an instrumented method is exited. Removes the pushed instrumentation frame
481   // returning the intended link register. Generates method exit events. The gpr_result and
482   // fpr_result pointers are pointers to the locations where the integer/pointer and floating point
483   // result values of the function are stored. Both pointers must always be valid but the values
484   // held there will only be meaningful if interpreted as the appropriate type given the function
485   // being returned from.
486   TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
487                                              uint64_t* gpr_result, uint64_t* fpr_result)
488       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
489 
490   // Pops nframes instrumentation frames from the current thread. Returns the return pc for the last
491   // instrumentation frame that's popped.
492   uintptr_t PopFramesForDeoptimization(Thread* self, size_t nframes) const
493       REQUIRES_SHARED(Locks::mutator_lock_);
494 
495   // Call back for configure stubs.
496   void InstallStubsForClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_)
497       REQUIRES(!GetDeoptimizedMethodsLock());
498 
499   void InstallStubsForMethod(ArtMethod* method)
500       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
501 
502   // Sets up instrumentation to allow single thread deoptimization using ForceInterpreterCount.
503   void EnableSingleThreadDeopt()
504       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
505       REQUIRES(!Locks::thread_list_lock_,
506                !Locks::classlinker_classes_lock_,
507                !GetDeoptimizedMethodsLock());
508 
509   // Install instrumentation exit stub on every method of the stack of the given thread.
510   // This is used by the debugger to cause a deoptimization of the thread's stack after updating
511   // local variable(s).
512   void InstrumentThreadStack(Thread* thread)
513       REQUIRES_SHARED(Locks::mutator_lock_);
514 
515   static size_t ComputeFrameId(Thread* self,
516                                size_t frame_depth,
517                                size_t inlined_frames_before_frame)
518       REQUIRES_SHARED(Locks::mutator_lock_);
519 
520   // Does not hold lock, used to check if someone changed from not instrumented to instrumented
521   // during a GC suspend point.
AllocEntrypointsInstrumented()522   bool AllocEntrypointsInstrumented() const REQUIRES_SHARED(Locks::mutator_lock_) {
523     return alloc_entrypoints_instrumented_;
524   }
525 
526   InstrumentationLevel GetCurrentInstrumentationLevel() const;
527 
528  private:
529   // Returns true if moving to the given instrumentation level requires the installation of stubs.
530   // False otherwise.
531   bool RequiresInstrumentationInstallation(InstrumentationLevel new_level) const;
532 
533   // Does the job of installing or removing instrumentation code within methods.
534   // In order to support multiple clients using instrumentation at the same time,
535   // the caller must pass a unique key (a string) identifying it so we remind which
536   // instrumentation level it needs. Therefore the current instrumentation level
537   // becomes the highest instrumentation level required by a client.
538   void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
539       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
540       REQUIRES(!GetDeoptimizedMethodsLock(),
541                !Locks::thread_list_lock_,
542                !Locks::classlinker_classes_lock_);
543   void UpdateStubs() REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
544       REQUIRES(!GetDeoptimizedMethodsLock(),
545                !Locks::thread_list_lock_,
546                !Locks::classlinker_classes_lock_);
547   void UpdateInstrumentationLevels(InstrumentationLevel level)
548       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
549       REQUIRES(!GetDeoptimizedMethodsLock(),
550                !Locks::thread_list_lock_,
551                !Locks::classlinker_classes_lock_);
552 
UpdateInterpreterHandlerTable()553   void UpdateInterpreterHandlerTable() REQUIRES(Locks::mutator_lock_) {
554     /*
555      * TUNING: Dalvik's mterp stashes the actual current handler table base in a
556      * tls field.  For Arm, this enables all suspend, debug & tracing checks to be
557      * collapsed into a single conditionally-executed ldw instruction.
558      * Move to Dalvik-style handler-table management for both the goto interpreter and
559      * mterp.
560      */
561     interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
562   }
563 
564   // No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
565   // exclusive access to mutator lock which you can't get if the runtime isn't started.
566   void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
567 
568   void MethodEnterEventImpl(Thread* thread,
569                             ObjPtr<mirror::Object> this_object,
570                             ArtMethod* method,
571                             uint32_t dex_pc) const
572       REQUIRES_SHARED(Locks::mutator_lock_);
573   void MethodExitEventImpl(Thread* thread,
574                            ObjPtr<mirror::Object> this_object,
575                            ArtMethod* method,
576                            uint32_t dex_pc,
577                            const JValue& return_value) const
578       REQUIRES_SHARED(Locks::mutator_lock_);
579   void DexPcMovedEventImpl(Thread* thread,
580                            ObjPtr<mirror::Object> this_object,
581                            ArtMethod* method,
582                            uint32_t dex_pc) const
583       REQUIRES_SHARED(Locks::mutator_lock_);
584   void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
585       REQUIRES_SHARED(Locks::mutator_lock_);
586   void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const
587       REQUIRES_SHARED(Locks::mutator_lock_);
588   void FieldReadEventImpl(Thread* thread,
589                           ObjPtr<mirror::Object> this_object,
590                           ArtMethod* method,
591                           uint32_t dex_pc,
592                           ArtField* field) const
593       REQUIRES_SHARED(Locks::mutator_lock_);
594   void FieldWriteEventImpl(Thread* thread,
595                            ObjPtr<mirror::Object> this_object,
596                            ArtMethod* method,
597                            uint32_t dex_pc,
598                            ArtField* field,
599                            const JValue& field_value) const
600       REQUIRES_SHARED(Locks::mutator_lock_);
601 
602   // Read barrier-aware utility functions for accessing deoptimized_methods_
603   bool AddDeoptimizedMethod(ArtMethod* method)
604       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
605   bool IsDeoptimizedMethod(ArtMethod* method)
606       REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
607   bool RemoveDeoptimizedMethod(ArtMethod* method)
608       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
609   ArtMethod* BeginDeoptimizedMethod()
610       REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
611   bool IsDeoptimizedMethodsEmpty() const
612       REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
613   void UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code)
614       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
615 
GetDeoptimizedMethodsLock()616   ReaderWriterMutex* GetDeoptimizedMethodsLock() const {
617     return deoptimized_methods_lock_.get();
618   }
619 
620   // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
621   bool instrumentation_stubs_installed_;
622 
623   // Have we hijacked ArtMethod::code_ to reference the enter/exit stubs?
624   bool entry_exit_stubs_installed_;
625 
626   // Have we hijacked ArtMethod::code_ to reference the enter interpreter stub?
627   bool interpreter_stubs_installed_;
628 
629   // Do we need the fidelity of events that we only get from running within the interpreter?
630   bool interpret_only_;
631 
632   // Did the runtime request we only run in the interpreter? ie -Xint mode.
633   bool forced_interpret_only_;
634 
635   // Do we have any listeners for method entry events? Short-cut to avoid taking the
636   // instrumentation_lock_.
637   bool have_method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
638 
639   // Do we have any listeners for method exit events? Short-cut to avoid taking the
640   // instrumentation_lock_.
641   bool have_method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
642 
643   // Do we have any listeners for method unwind events? Short-cut to avoid taking the
644   // instrumentation_lock_.
645   bool have_method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
646 
647   // Do we have any listeners for dex move events? Short-cut to avoid taking the
648   // instrumentation_lock_.
649   bool have_dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
650 
651   // Do we have any listeners for field read events? Short-cut to avoid taking the
652   // instrumentation_lock_.
653   bool have_field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
654 
655   // Do we have any listeners for field write events? Short-cut to avoid taking the
656   // instrumentation_lock_.
657   bool have_field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
658 
659   // Do we have any exception thrown listeners? Short-cut to avoid taking the instrumentation_lock_.
660   bool have_exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
661 
662   // Do we have any frame pop listeners? Short-cut to avoid taking the instrumentation_lock_.
663   bool have_watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
664 
665   // Do we have any branch listeners? Short-cut to avoid taking the instrumentation_lock_.
666   bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
667 
668   // Do we have any exception handled listeners? Short-cut to avoid taking the
669   // instrumentation_lock_.
670   bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
671 
672   // Contains the instrumentation level required by each client of the instrumentation identified
673   // by a string key.
674   typedef SafeMap<const char*, InstrumentationLevel> InstrumentationLevelTable;
675   InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_);
676 
677   // The event listeners, written to with the mutator_lock_ exclusively held.
678   // Mutators must be able to iterate over these lists concurrently, that is, with listeners being
679   // added or removed while iterating. The modifying thread holds exclusive lock,
680   // so other threads cannot iterate (i.e. read the data of the list) at the same time but they
681   // do keep iterators that need to remain valid. This is the reason these listeners are std::list
682   // and not for example std::vector: the existing storage for a std::list does not move.
683   // Note that mutators cannot make a copy of these lists before iterating, as the instrumentation
684   // listeners can also be deleted concurrently.
685   // As a result, these lists are never trimmed. That's acceptable given the low number of
686   // listeners we have.
687   std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
688   std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
689   std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
690   std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
691   std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
692   std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
693   std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
694   std::list<InstrumentationListener*> exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
695   std::list<InstrumentationListener*> watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
696   std::list<InstrumentationListener*> exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
697 
698   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
699   // only.
700   mutable std::unique_ptr<ReaderWriterMutex> deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
701   std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(GetDeoptimizedMethodsLock());
702   bool deoptimization_enabled_;
703 
704   // Current interpreter handler table. This is updated each time the thread state flags are
705   // modified.
706   InterpreterHandlerTable interpreter_handler_table_ GUARDED_BY(Locks::mutator_lock_);
707 
708   // Greater than 0 if quick alloc entry points instrumented.
709   size_t quick_alloc_entry_points_instrumentation_counter_;
710 
711   // alloc_entrypoints_instrumented_ is only updated with all the threads suspended, this is done
712   // to prevent races with the GC where the GC relies on thread suspension only see
713   // alloc_entrypoints_instrumented_ change during suspend points.
714   bool alloc_entrypoints_instrumented_;
715 
716   // If we can use instrumentation trampolines. After the first time we instrument something with
717   // the interpreter we can no longer use trampolines because it can lead to stack corruption.
718   // TODO Figure out a way to remove the need for this.
719   bool can_use_instrumentation_trampolines_;
720 
721   friend class InstrumentationTest;  // For GetCurrentInstrumentationLevel and ConfigureStubs.
722   friend class InstrumentationStackPopper;  // For popping instrumentation frames.
723 
724   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
725 };
726 std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationEvent& rhs);
727 std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationLevel& rhs);
728 
729 // An element in the instrumentation side stack maintained in art::Thread.
730 struct InstrumentationStackFrame {
InstrumentationStackFrameInstrumentationStackFrame731   InstrumentationStackFrame(mirror::Object* this_object,
732                             ArtMethod* method,
733                             uintptr_t return_pc,
734                             size_t frame_id,
735                             bool interpreter_entry)
736       : this_object_(this_object),
737         method_(method),
738         return_pc_(return_pc),
739         frame_id_(frame_id),
740         interpreter_entry_(interpreter_entry) {
741   }
742 
743   std::string Dump() const REQUIRES_SHARED(Locks::mutator_lock_);
744 
745   mirror::Object* this_object_;
746   ArtMethod* method_;
747   uintptr_t return_pc_;
748   size_t frame_id_;
749   bool interpreter_entry_;
750 };
751 
752 }  // namespace instrumentation
753 }  // namespace art
754 
755 #endif  // ART_RUNTIME_INSTRUMENTATION_H_
756