1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_INSTRUMENTATION_H_
18 #define ART_RUNTIME_INSTRUMENTATION_H_
19 
20 #include <stdint.h>
21 
22 #include <functional>
23 #include <list>
24 #include <memory>
25 #include <optional>
26 #include <queue>
27 #include <unordered_set>
28 
29 #include "arch/instruction_set.h"
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/pointer_size.h"
33 #include "base/safe_map.h"
34 #include "gc_root.h"
35 #include "jvalue.h"
36 #include "offsets.h"
37 
38 namespace art HIDDEN {
39 namespace mirror {
40 class Class;
41 class Object;
42 class Throwable;
43 }  // namespace mirror
44 class ArtField;
45 class ArtMethod;
46 template <typename T> class Handle;
47 template <typename T> class MutableHandle;
48 struct NthCallerVisitor;
49 union JValue;
50 class OatQuickMethodHeader;
51 class SHARED_LOCKABLE ReaderWriterMutex;
52 class ShadowFrame;
53 class Thread;
54 enum class DeoptimizationMethodType;
55 
56 namespace instrumentation {
57 
58 
59 // Do we want to deoptimize for method entry and exit listeners or just try to intercept
60 // invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
61 // application's performance.
62 static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
63 
64 // an optional frame is either Some(const ShadowFrame& current_frame) or None depending on if the
65 // method being exited has a shadow-frame associed with the current stack frame. In cases where
66 // there is no shadow-frame associated with this stack frame this will be None.
67 using OptionalFrame = std::optional<std::reference_wrapper<const ShadowFrame>>;
68 
69 // Instrumentation event listener API. Registered listeners will get the appropriate call back for
70 // the events they are listening for. The call backs supply the thread, method and dex_pc the event
71 // occurred upon. The thread may or may not be Thread::Current().
72 struct InstrumentationListener {
InstrumentationListenerInstrumentationListener73   InstrumentationListener() {}
~InstrumentationListenerInstrumentationListener74   virtual ~InstrumentationListener() {}
75 
76   // Call-back for when a method is entered.
77   virtual void MethodEntered(Thread* thread, ArtMethod* method)
78       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
79 
80   virtual void MethodExited(Thread* thread,
81                             ArtMethod* method,
82                             OptionalFrame frame,
83                             MutableHandle<mirror::Object>& return_value)
84       REQUIRES_SHARED(Locks::mutator_lock_);
85 
86   // Call-back for when a method is exited. The implementor should either handler-ize the return
87   // value (if appropriate) or use the alternate MethodExited callback instead if they need to
88   // go through a suspend point.
89   virtual void MethodExited(Thread* thread,
90                             ArtMethod* method,
91                             OptionalFrame frame,
92                             JValue& return_value)
93       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
94 
95   // Call-back for when a method is popped due to an exception throw. A method will either cause a
96   // MethodExited call-back or a MethodUnwind call-back when its activation is removed.
97   virtual void MethodUnwind(Thread* thread,
98                             ArtMethod* method,
99                             uint32_t dex_pc)
100       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
101 
102   // Call-back for when the dex pc moves in a method.
103   virtual void DexPcMoved(Thread* thread,
104                           Handle<mirror::Object> this_object,
105                           ArtMethod* method,
106                           uint32_t new_dex_pc)
107       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
108 
109   // Call-back for when we read from a field.
110   virtual void FieldRead(Thread* thread,
111                          Handle<mirror::Object> this_object,
112                          ArtMethod* method,
113                          uint32_t dex_pc,
114                          ArtField* field) = 0;
115 
116   virtual void FieldWritten(Thread* thread,
117                             Handle<mirror::Object> this_object,
118                             ArtMethod* method,
119                             uint32_t dex_pc,
120                             ArtField* field,
121                             Handle<mirror::Object> field_value)
122       REQUIRES_SHARED(Locks::mutator_lock_);
123 
124   // Call-back for when we write into a field.
125   virtual void FieldWritten(Thread* thread,
126                             Handle<mirror::Object> this_object,
127                             ArtMethod* method,
128                             uint32_t dex_pc,
129                             ArtField* field,
130                             const JValue& field_value)
131       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
132 
133   // Call-back when an exception is thrown.
134   virtual void ExceptionThrown(Thread* thread,
135                                Handle<mirror::Throwable> exception_object)
136       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
137 
138   // Call-back when an exception is caught/handled by java code.
139   virtual void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
140       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
141 
142   // Call-back for when we execute a branch.
143   virtual void Branch(Thread* thread,
144                       ArtMethod* method,
145                       uint32_t dex_pc,
146                       int32_t dex_pc_offset)
147       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
148 
149   // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
150   // either return or exceptions. Normally instrumentation listeners should ensure that there are
151   // shadow-frames by deoptimizing stacks.
152   virtual void WatchedFramePop([[maybe_unused]] Thread* thread,
153                                [[maybe_unused]] const ShadowFrame& frame)
154       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
155 };
156 
157 class Instrumentation;
158 // A helper to send instrumentation events while popping the stack in a safe way.
159 class InstrumentationStackPopper {
160  public:
161   explicit InstrumentationStackPopper(Thread* self);
162   ~InstrumentationStackPopper() REQUIRES_SHARED(Locks::mutator_lock_);
163 
164   // Increase the number of frames being popped up to `stack_pointer`. Return true if the
165   // frames were popped without any exceptions, false otherwise. The exception that caused
166   // the pop is 'exception'.
167   bool PopFramesTo(uintptr_t stack_pointer, /*in-out*/MutableHandle<mirror::Throwable>& exception)
168       REQUIRES_SHARED(Locks::mutator_lock_);
169 
170  private:
171   Thread* self_;
172   Instrumentation* instrumentation_;
173   // The stack pointer limit for frames to pop.
174   uintptr_t pop_until_;
175 };
176 
177 // Instrumentation is a catch-all for when extra information is required from the runtime. The
178 // typical use for instrumentation is for profiling and debugging. Instrumentation may add stubs
179 // to method entry and exit, it may also force execution to be switched to the interpreter and
180 // trigger deoptimization.
181 class Instrumentation {
182  public:
183   enum InstrumentationEvent {
184     kMethodEntered = 0x1,
185     kMethodExited = 0x2,
186     kMethodUnwind = 0x4,
187     kDexPcMoved = 0x8,
188     kFieldRead = 0x10,
189     kFieldWritten = 0x20,
190     kExceptionThrown = 0x40,
191     kBranch = 0x80,
192     kWatchedFramePop = 0x200,
193     kExceptionHandled = 0x400,
194   };
195 
196   enum class InstrumentationLevel {
197     kInstrumentNothing,             // execute without instrumentation
198     kInstrumentWithEntryExitHooks,  // execute with entry/exit hooks
199     kInstrumentWithInterpreter      // execute with interpreter
200   };
201 
202   static constexpr uint8_t kFastTraceListeners = 0b01;
203   static constexpr uint8_t kSlowMethodEntryExitListeners = 0b10;
204 
205   Instrumentation();
206 
RunExitHooksOffset()207   static constexpr MemberOffset RunExitHooksOffset() {
208     // Assert that run_entry_exit_hooks_ is 8bits wide. If the size changes
209     // update the compare instructions in the code generator when generating checks for
210     // MethodEntryExitHooks.
211     static_assert(sizeof(run_exit_hooks_) == 1, "run_exit_hooks_ isn't expected size");
212     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, run_exit_hooks_));
213   }
214 
HaveMethodEntryListenersOffset()215   static constexpr MemberOffset HaveMethodEntryListenersOffset() {
216     // Assert that have_method_entry_listeners_ is 8bits wide. If the size changes
217     // update the compare instructions in the code generator when generating checks for
218     // MethodEntryExitHooks.
219     static_assert(sizeof(have_method_entry_listeners_) == 1,
220                   "have_method_entry_listeners_ isn't expected size");
221     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_entry_listeners_));
222   }
223 
HaveMethodExitListenersOffset()224   static constexpr MemberOffset HaveMethodExitListenersOffset() {
225     // Assert that have_method_exit_slow_listeners_ is 8bits wide. If the size changes
226     // update the compare instructions in the code generator when generating checks for
227     // MethodEntryExitHooks.
228     static_assert(sizeof(have_method_exit_listeners_) == 1,
229                   "have_method_exit_listeners_ isn't expected size");
230     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_exit_listeners_));
231   }
232 
233   // Add a listener to be notified of the masked together sent of instrumentation events. This
234   // suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy
235   // for saying you should have suspended all threads (installing stubs while threads are running
236   // will break).
237   EXPORT void AddListener(InstrumentationListener* listener,
238                           uint32_t events,
239                           bool is_trace_listener = false)
240       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
241 
242   // Removes listeners for the specified events.
243   EXPORT void RemoveListener(InstrumentationListener* listener,
244                              uint32_t events,
245                              bool is_trace_listener = false)
246       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
247 
248   // Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
249   // try_switch_to_non_debuggable specifies if we can switch the runtime back to non-debuggable.
250   // When a debugger is attached to a non-debuggable app, we switch the runtime to debuggable and
251   // when we are detaching the debugger we move back to non-debuggable. If we are disabling
252   // deoptimization for other reasons (ex: removing the last breakpoint) while the debugger is still
253   // connected, we pass false to stay in debuggable. Switching runtimes is expensive so we only want
254   // to switch when we know debug features aren't needed anymore.
255   EXPORT void DisableDeoptimization(const char* key, bool try_switch_to_non_debuggable)
256       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
257 
258   // Enables entry exit hooks support. This is called in preparation for debug requests that require
259   // calling method entry / exit hooks.
260   EXPORT void EnableEntryExitHooks(const char* key)
261       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
262 
AreAllMethodsDeoptimized()263   bool AreAllMethodsDeoptimized() const {
264     return InterpreterStubsInstalled();
265   }
266   bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
267 
268   // Executes everything with interpreter.
269   EXPORT void DeoptimizeEverything(const char* key)
270       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
271           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
272 
273   // Executes everything with compiled code (or interpreter if there is no code). May visit class
274   // linker classes through ConfigureStubs.
275   EXPORT void UndeoptimizeEverything(const char* key)
276       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
277           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
278 
279   // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
280   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
281   // once its declaring class is initialized.
282   EXPORT void Deoptimize(ArtMethod* method)
283       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
284 
285   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
286   // (except a class initializer) set to the resolution trampoline will be updated only once its
287   // declaring class is initialized.
288   EXPORT void Undeoptimize(ArtMethod* method)
289       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
290 
291   // Indicates whether the method has been deoptimized so it is executed with the interpreter.
292   EXPORT bool IsDeoptimized(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
293 
294   // Indicates if any method needs to be deoptimized. This is used to avoid walking the stack to
295   // determine if a deoptimization is required.
296   bool IsDeoptimizedMethodsEmpty() const REQUIRES_SHARED(Locks::mutator_lock_);
297 
298   // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
299   EXPORT void EnableMethodTracing(
300       const char* key,
301       InstrumentationListener* listener,
302       bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
303       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
304           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
305 
306   // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
307   EXPORT void DisableMethodTracing(const char* key)
308       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
309           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
310 
311   void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
312   void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
313   void InstrumentQuickAllocEntryPointsLocked()
314       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
315                !Locks::runtime_shutdown_lock_);
316   void UninstrumentQuickAllocEntryPointsLocked()
317       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
318                !Locks::runtime_shutdown_lock_);
319   void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_);
320 
321   // Returns a string representation of the given entry point.
322   static std::string EntryPointString(const void* code);
323 
324   // Initialize the entrypoint of the method .`aot_code` is the AOT code.
325   EXPORT void InitializeMethodsCode(ArtMethod* method, const void* aot_code)
326       REQUIRES_SHARED(Locks::mutator_lock_);
327 
328   // Update the code of a method respecting any installed stubs.
329   void UpdateMethodsCode(ArtMethod* method, const void* new_code)
330       REQUIRES_SHARED(Locks::mutator_lock_);
331 
332   // Update the code of a native method to a JITed stub.
333   void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* new_code)
334       REQUIRES_SHARED(Locks::mutator_lock_);
335 
336   // Return the code that we can execute for an invoke including from the JIT.
337   EXPORT const void* GetCodeForInvoke(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
338 
339   // Return the code that we can execute considering the current instrumentation level.
340   // If interpreter stubs are installed return interpreter bridge. If the entry exit stubs
341   // are installed return an instrumentation entry point. Otherwise, return the code that
342   // can be executed including from the JIT.
343   const void* GetMaybeInstrumentedCodeForInvoke(ArtMethod* method)
344       REQUIRES_SHARED(Locks::mutator_lock_);
345 
ForceInterpretOnly()346   void ForceInterpretOnly() {
347     forced_interpret_only_ = true;
348   }
349 
EntryExitStubsInstalled()350   bool EntryExitStubsInstalled() const {
351     return instrumentation_level_ == InstrumentationLevel::kInstrumentWithEntryExitHooks ||
352            instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter;
353   }
354 
InterpreterStubsInstalled()355   bool InterpreterStubsInstalled() const {
356     return instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter;
357   }
358 
359   // Called by ArtMethod::Invoke to determine dispatch mechanism.
InterpretOnly()360   bool InterpretOnly() const {
361     return forced_interpret_only_ || InterpreterStubsInstalled();
362   }
363   bool InterpretOnly(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
364 
IsForcedInterpretOnly()365   bool IsForcedInterpretOnly() const {
366     return forced_interpret_only_;
367   }
368 
RunExitHooks()369   bool RunExitHooks() const {
370     return run_exit_hooks_;
371   }
372 
HasMethodEntryListeners()373   bool HasMethodEntryListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
374     return have_method_entry_listeners_ != 0;
375   }
376 
HasMethodExitListeners()377   bool HasMethodExitListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
378     return have_method_exit_listeners_ != 0;
379   }
380 
HasFastMethodEntryListenersOnly()381   bool HasFastMethodEntryListenersOnly() const REQUIRES_SHARED(Locks::mutator_lock_) {
382     return have_method_entry_listeners_ == kFastTraceListeners;
383   }
384 
HasFastMethodExitListenersOnly()385   bool HasFastMethodExitListenersOnly() const REQUIRES_SHARED(Locks::mutator_lock_) {
386     return have_method_exit_listeners_ == kFastTraceListeners;
387   }
388 
HasMethodUnwindListeners()389   bool HasMethodUnwindListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
390     return have_method_unwind_listeners_;
391   }
392 
HasDexPcListeners()393   bool HasDexPcListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
394     return have_dex_pc_listeners_;
395   }
396 
HasFieldReadListeners()397   bool HasFieldReadListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
398     return have_field_read_listeners_;
399   }
400 
HasFieldWriteListeners()401   bool HasFieldWriteListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
402     return have_field_write_listeners_;
403   }
404 
HasExceptionThrownListeners()405   bool HasExceptionThrownListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
406     return have_exception_thrown_listeners_;
407   }
408 
HasBranchListeners()409   bool HasBranchListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
410     return have_branch_listeners_;
411   }
412 
HasWatchedFramePopListeners()413   bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
414     return have_watched_frame_pop_listeners_;
415   }
416 
HasExceptionHandledListeners()417   bool HasExceptionHandledListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
418     return have_exception_handled_listeners_;
419   }
420 
421   // Returns if dex pc events need to be reported for the specified method.
422   // These events are reported when DexPCListeners are installed and at least one of the
423   // following conditions hold:
424   // 1. The method is deoptimized. This is done when there is a breakpoint on method.
425   // 2. When the thread is deoptimized. This is used when single stepping a single thread.
426   // 3. When interpreter stubs are installed. In this case no additional information is maintained
427   //    about which methods need dex pc move events. This is usually used for features which need
428   //    them for several methods across threads or need expensive processing. So it is OK to not
429   //    further optimize this case.
430   // DexPCListeners are installed when there is a breakpoint on any method / single stepping
431   // on any of thread. These are removed when the last breakpoint was removed. See AddListener and
432   // RemoveListener for more details.
433   bool NeedsDexPcEvents(ArtMethod* method, Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
434 
NeedsSlowInterpreterForListeners()435   bool NeedsSlowInterpreterForListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
436     return have_field_read_listeners_ ||
437            have_field_write_listeners_ ||
438            have_watched_frame_pop_listeners_ ||
439            have_exception_handled_listeners_;
440   }
441 
442   // Inform listeners that a method has been entered. A dex PC is provided as we may install
443   // listeners into executing code and get method enter events for methods already on the stack.
MethodEnterEvent(Thread * thread,ArtMethod * method)444   void MethodEnterEvent(Thread* thread, ArtMethod* method) const
445       REQUIRES_SHARED(Locks::mutator_lock_) {
446     if (UNLIKELY(HasMethodEntryListeners())) {
447       MethodEnterEventImpl(thread, method);
448     }
449   }
450 
451   // Inform listeners that a method has been exited.
452   template<typename T>
MethodExitEvent(Thread * thread,ArtMethod * method,OptionalFrame frame,T & return_value)453   void MethodExitEvent(Thread* thread,
454                        ArtMethod* method,
455                        OptionalFrame frame,
456                        T& return_value) const
457       REQUIRES_SHARED(Locks::mutator_lock_) {
458     if (UNLIKELY(HasMethodExitListeners())) {
459       MethodExitEventImpl(thread, method, frame, return_value);
460     }
461   }
462 
463   // Inform listeners that a method has been exited due to an exception.
464   void MethodUnwindEvent(Thread* thread,
465                          ArtMethod* method,
466                          uint32_t dex_pc) const
467       REQUIRES_SHARED(Locks::mutator_lock_);
468 
469   // Inform listeners that the dex pc has moved (only supported by the interpreter).
DexPcMovedEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc)470   void DexPcMovedEvent(Thread* thread,
471                        ObjPtr<mirror::Object> this_object,
472                        ArtMethod* method,
473                        uint32_t dex_pc) const
474       REQUIRES_SHARED(Locks::mutator_lock_) {
475     if (UNLIKELY(HasDexPcListeners())) {
476       DexPcMovedEventImpl(thread, this_object, method, dex_pc);
477     }
478   }
479 
480   // Inform listeners that a branch has been taken (only supported by the interpreter).
Branch(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t offset)481   void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
482       REQUIRES_SHARED(Locks::mutator_lock_) {
483     if (UNLIKELY(HasBranchListeners())) {
484       BranchImpl(thread, method, dex_pc, offset);
485     }
486   }
487 
488   // Inform listeners that we read a field (only supported by the interpreter).
FieldReadEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)489   void FieldReadEvent(Thread* thread,
490                       ObjPtr<mirror::Object> this_object,
491                       ArtMethod* method,
492                       uint32_t dex_pc,
493                       ArtField* field) const
494       REQUIRES_SHARED(Locks::mutator_lock_) {
495     if (UNLIKELY(HasFieldReadListeners())) {
496       FieldReadEventImpl(thread, this_object, method, dex_pc, field);
497     }
498   }
499 
500   // Inform listeners that we write a field (only supported by the interpreter).
FieldWriteEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)501   void FieldWriteEvent(Thread* thread,
502                        ObjPtr<mirror::Object> this_object,
503                        ArtMethod* method,
504                        uint32_t dex_pc,
505                        ArtField* field,
506                        const JValue& field_value) const
507       REQUIRES_SHARED(Locks::mutator_lock_) {
508     if (UNLIKELY(HasFieldWriteListeners())) {
509       FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
510     }
511   }
512 
513   // Inform listeners that a branch has been taken (only supported by the interpreter).
WatchedFramePopped(Thread * thread,const ShadowFrame & frame)514   void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const
515       REQUIRES_SHARED(Locks::mutator_lock_) {
516     if (UNLIKELY(HasWatchedFramePopListeners())) {
517       WatchedFramePopImpl(thread, frame);
518     }
519   }
520 
521   // Inform listeners that an exception was thrown.
522   void ExceptionThrownEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
523       REQUIRES_SHARED(Locks::mutator_lock_);
524 
525   // Inform listeners that an exception has been handled. This is not sent for native code or for
526   // exceptions which reach the end of the thread's stack.
527   void ExceptionHandledEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
528       REQUIRES_SHARED(Locks::mutator_lock_);
529 
530   JValue GetReturnValue(ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result)
531       REQUIRES_SHARED(Locks::mutator_lock_);
532   bool PushDeoptContextIfNeeded(Thread* self,
533                                 DeoptimizationMethodType deopt_type,
534                                 bool is_ref,
535                                 const JValue& result) REQUIRES_SHARED(Locks::mutator_lock_);
536   void DeoptimizeIfNeeded(Thread* self,
537                           ArtMethod** sp,
538                           DeoptimizationMethodType type,
539                           JValue result,
540                           bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_);
541   // This returns if the caller of runtime method requires a deoptimization. This checks both if the
542   // method requires a deopt or if this particular frame needs a deopt because of a class
543   // redefinition.
544   bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_);
545   bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp, size_t frame_size)
546       REQUIRES_SHARED(Locks::mutator_lock_);
547   // This returns if the specified method requires a deoptimization. This doesn't account if a stack
548   // frame involving this method requires a deoptimization.
549   bool NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method)
550       REQUIRES_SHARED(Locks::mutator_lock_);
551 
552   DeoptimizationMethodType GetDeoptimizationMethodType(ArtMethod* method)
553       REQUIRES_SHARED(Locks::mutator_lock_);
554 
555   // Call back for configure stubs.
556   void InstallStubsForClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
557 
558   void InstallStubsForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
559 
560   EXPORT void UpdateEntrypointsForDebuggable() REQUIRES(art::Locks::mutator_lock_);
561 
562   // Install instrumentation exit stub on every method of the stack of the given thread.
563   // This is used by:
564   //  - the debugger to cause a deoptimization of the all frames in thread's stack (for
565   //    example, after updating local variables)
566   //  - to call method entry / exit hooks for tracing. For this we instrument
567   //    the stack frame to run entry / exit hooks but we don't need to deoptimize.
568   // force_deopt indicates whether the frames need to deoptimize or not.
569   EXPORT void InstrumentThreadStack(Thread* thread, bool force_deopt)
570       REQUIRES(Locks::mutator_lock_);
571   void InstrumentAllThreadStacks(bool force_deopt) REQUIRES(Locks::mutator_lock_)
572       REQUIRES(!Locks::thread_list_lock_);
573 
574   // Force all currently running frames to be deoptimized back to interpreter. This should only be
575   // used in cases where basically all compiled code has been invalidated.
576   EXPORT void DeoptimizeAllThreadFrames() REQUIRES(art::Locks::mutator_lock_);
577 
578   static size_t ComputeFrameId(Thread* self,
579                                size_t frame_depth,
580                                size_t inlined_frames_before_frame)
581       REQUIRES_SHARED(Locks::mutator_lock_);
582 
583   // Does not hold lock, used to check if someone changed from not instrumented to instrumented
584   // during a GC suspend point.
AllocEntrypointsInstrumented()585   bool AllocEntrypointsInstrumented() const REQUIRES_SHARED(Locks::mutator_lock_) {
586     return alloc_entrypoints_instrumented_;
587   }
588 
589   bool ProcessMethodUnwindCallbacks(Thread* self,
590                                     std::queue<ArtMethod*>& methods,
591                                     MutableHandle<mirror::Throwable>& exception)
592       REQUIRES_SHARED(Locks::mutator_lock_);
593 
594   EXPORT InstrumentationLevel GetCurrentInstrumentationLevel() const;
595 
596   bool MethodSupportsExitEvents(ArtMethod* method, const OatQuickMethodHeader* header)
597       REQUIRES_SHARED(Locks::mutator_lock_);
598 
599  private:
600   // Update the current instrumentation_level_.
601   void UpdateInstrumentationLevel(InstrumentationLevel level);
602 
603   // Does the job of installing or removing instrumentation code within methods.
604   // In order to support multiple clients using instrumentation at the same time,
605   // the caller must pass a unique key (a string) identifying it so we remind which
606   // instrumentation level it needs. Therefore the current instrumentation level
607   // becomes the highest instrumentation level required by a client.
608   void ConfigureStubs(const char* key,
609                       InstrumentationLevel desired_instrumentation_level,
610                       bool try_switch_to_non_debuggable)
611       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
612       REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
613   void UpdateStubs(bool try_switch_to_non_debuggable)
614       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
615       REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
616 
617   // If there are no pending deoptimizations restores the stack to the normal state by updating the
618   // return pcs to actual return addresses from the instrumentation stack and clears the
619   // instrumentation stack.
620   void MaybeRestoreInstrumentationStack() REQUIRES(Locks::mutator_lock_);
621 
622   // Switches the runtime state to non-java debuggable if entry / exit hooks are no longer required
623   // and the runtime did not start off as java debuggable.
624   void MaybeSwitchRuntimeDebugState(Thread* self)
625       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
626 
627   // No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
628   // exclusive access to mutator lock which you can't get if the runtime isn't started.
629   void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
630 
631   void MethodEnterEventImpl(Thread* thread, ArtMethod* method) const
632       REQUIRES_SHARED(Locks::mutator_lock_);
633   template <typename T>
634   void MethodExitEventImpl(Thread* thread,
635                            ArtMethod* method,
636                            OptionalFrame frame,
637                            T& return_value) const
638       REQUIRES_SHARED(Locks::mutator_lock_);
639   void DexPcMovedEventImpl(Thread* thread,
640                            ObjPtr<mirror::Object> this_object,
641                            ArtMethod* method,
642                            uint32_t dex_pc) const
643       REQUIRES_SHARED(Locks::mutator_lock_);
644   void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
645       REQUIRES_SHARED(Locks::mutator_lock_);
646   void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const
647       REQUIRES_SHARED(Locks::mutator_lock_);
648   void FieldReadEventImpl(Thread* thread,
649                           ObjPtr<mirror::Object> this_object,
650                           ArtMethod* method,
651                           uint32_t dex_pc,
652                           ArtField* field) const
653       REQUIRES_SHARED(Locks::mutator_lock_);
654   void FieldWriteEventImpl(Thread* thread,
655                            ObjPtr<mirror::Object> this_object,
656                            ArtMethod* method,
657                            uint32_t dex_pc,
658                            ArtField* field,
659                            const JValue& field_value) const
660       REQUIRES_SHARED(Locks::mutator_lock_);
661 
662   // Read barrier-aware utility functions for accessing deoptimized_methods_
663   bool AddDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
664   bool IsDeoptimizedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
665   bool RemoveDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
666   void UpdateMethodsCodeImpl(ArtMethod* method, const void* new_code)
667       REQUIRES_SHARED(Locks::mutator_lock_);
668 
669   // We need to run method exit hooks for two reasons:
670   // 1. When method exit listeners are installed
671   // 2. When we need to check if the caller of this method needs a deoptimization. This is needed
672   // only for deoptimizing the currently active invocations on stack when we deoptimize a method or
673   // invalidate the JITed code when redefining the classes. So future invocations don't need to do
674   // this check.
675   //
676   // For JITed code of non-native methods we already have a stack slot reserved for deoptimizing
677   // on demand and we use that stack slot to check if the caller needs a deoptimization. JITed code
678   // checks if there are any method exit listeners or if the stack slot is set to determine if
679   // method exit hooks need to be executed.
680   //
681   // For JITed JNI stubs there is no reserved stack slot for this and we just use this variable to
682   // check if we need to run method entry / exit hooks. This variable would be set when either of
683   // the above conditions are true. If we need method exit hooks only for case 2, we would call exit
684   // hooks for any future invocations which aren't necessary.
685   // QuickToInterpreterBridge and GenericJniStub also use this for same reasons.
686   // If calling entry / exit hooks becomes expensive we could do the same optimization we did for
687   // JITed code by having a reserved stack slot.
688   bool run_exit_hooks_;
689 
690   // The required level of instrumentation. This could be one of the following values:
691   // kInstrumentNothing: no instrumentation support is needed
692   // kInstrumentWithEntryExitHooks: needs support to call method entry/exit stubs.
693   // kInstrumentWithInterpreter: only execute with interpreter
694   Instrumentation::InstrumentationLevel instrumentation_level_;
695 
696   // Did the runtime request we only run in the interpreter? ie -Xint mode.
697   bool forced_interpret_only_;
698 
699   // For method entry / exit events, we maintain fast trace listeners in a separate list to make
700   // implementation of fast trace listeners more efficient by JITing the code to handle fast trace
701   // events. We use a uint8_t (and not bool) to encode if there are none / fast / slow listeners.
702   // Do we have any listeners for method entry events.
703   uint8_t have_method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
704 
705   // Do we have any listeners for method exit events.
706   uint8_t have_method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
707 
708   // Do we have any listeners for method unwind events?
709   bool have_method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
710 
711   // Do we have any listeners for dex move events?
712   bool have_dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
713 
714   // Do we have any listeners for field read events?
715   bool have_field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
716 
717   // Do we have any listeners for field write events?
718   bool have_field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
719 
720   // Do we have any exception thrown listeners?
721   bool have_exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
722 
723   // Do we have any frame pop listeners?
724   bool have_watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
725 
726   // Do we have any branch listeners?
727   bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
728 
729   // Do we have any exception handled listeners?
730   bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
731 
732   // Contains the instrumentation level required by each client of the instrumentation identified
733   // by a string key.
734   using InstrumentationLevelTable = SafeMap<const char*, InstrumentationLevel>;
735   InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_);
736 
737   // The event listeners, written to with the mutator_lock_ exclusively held.
738   // Mutators must be able to iterate over these lists concurrently, that is, with listeners being
739   // added or removed while iterating. The modifying thread holds exclusive lock,
740   // so other threads cannot iterate (i.e. read the data of the list) at the same time but they
741   // do keep iterators that need to remain valid. This is the reason these listeners are std::list
742   // and not for example std::vector: the existing storage for a std::list does not move.
743   // Note that mutators cannot make a copy of these lists before iterating, as the instrumentation
744   // listeners can also be deleted concurrently.
745   // As a result, these lists are never trimmed. That's acceptable given the low number of
746   // listeners we have.
747   std::list<InstrumentationListener*> method_entry_slow_listeners_ GUARDED_BY(Locks::mutator_lock_);
748   std::list<InstrumentationListener*> method_entry_fast_trace_listeners_
749       GUARDED_BY(Locks::mutator_lock_);
750   std::list<InstrumentationListener*> method_exit_slow_listeners_ GUARDED_BY(Locks::mutator_lock_);
751   std::list<InstrumentationListener*> method_exit_fast_trace_listeners_
752       GUARDED_BY(Locks::mutator_lock_);
753   std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
754   std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
755   std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
756   std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
757   std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
758   std::list<InstrumentationListener*> exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
759   std::list<InstrumentationListener*> watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
760   std::list<InstrumentationListener*> exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
761 
762   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
763   // only.
764   std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(Locks::mutator_lock_);
765 
766   // Current interpreter handler table. This is updated each time the thread state flags are
767   // modified.
768 
769   // Greater than 0 if quick alloc entry points instrumented.
770   size_t quick_alloc_entry_points_instrumentation_counter_;
771 
772   // alloc_entrypoints_instrumented_ is only updated with all the threads suspended, this is done
773   // to prevent races with the GC where the GC relies on thread suspension only see
774   // alloc_entrypoints_instrumented_ change during suspend points.
775   bool alloc_entrypoints_instrumented_;
776 
777   friend class InstrumentationTest;  // For GetCurrentInstrumentationLevel and ConfigureStubs.
778   friend class InstrumentationStackPopper;  // For popping instrumentation frames.
779   friend void InstrumentationInstallStack(Thread*, bool);
780 
781   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
782 };
783 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationEvent rhs);
784 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationLevel rhs);
785 
786 }  // namespace instrumentation
787 }  // namespace art
788 
789 #endif  // ART_RUNTIME_INSTRUMENTATION_H_
790