1 /* Copyright (C) 2016 The Android Open Source Project
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This file implements interfaces from the file jvmti.h. This implementation
5  * is licensed under the same terms as the file jvmti.h.  The
6  * copyright and license information for the file jvmti.h follows.
7  *
8  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10  *
11  * This code is free software; you can redistribute it and/or modify it
12  * under the terms of the GNU General Public License version 2 only, as
13  * published by the Free Software Foundation.  Oracle designates this
14  * particular file as subject to the "Classpath" exception as provided
15  * by Oracle in the LICENSE file that accompanied this code.
16  *
17  * This code is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20  * version 2 for more details (a copy is included in the LICENSE file that
21  * accompanied this code).
22  *
23  * You should have received a copy of the GNU General Public License version
24  * 2 along with this work; if not, write to the Free Software Foundation,
25  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26  *
27  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28  * or visit www.oracle.com if you need additional information or have any
29  * questions.
30  */
31 
32 #include "ti_stack.h"
33 
34 #include <algorithm>
35 #include <initializer_list>
36 #include <list>
37 #include <unordered_map>
38 #include <vector>
39 
40 #include "android-base/macros.h"
41 #include "android-base/thread_annotations.h"
42 #include "arch/context.h"
43 #include "art_field-inl.h"
44 #include "art_jvmti.h"
45 #include "art_method-inl.h"
46 #include "barrier.h"
47 #include "base/bit_utils.h"
48 #include "base/locks.h"
49 #include "base/macros.h"
50 #include "base/mutex.h"
51 #include "base/pointer_size.h"
52 #include "deopt_manager.h"
53 #include "dex/code_item_accessors-inl.h"
54 #include "dex/dex_file.h"
55 #include "dex/dex_file_annotations.h"
56 #include "dex/dex_file_types.h"
57 #include "dex/dex_instruction-inl.h"
58 #include "dex/primitive.h"
59 #include "events.h"
60 #include "gc_root.h"
61 #include "handle_scope-inl.h"
62 #include "instrumentation.h"
63 #include "interpreter/shadow_frame-inl.h"
64 #include "interpreter/shadow_frame.h"
65 #include "jni/jni_env_ext.h"
66 #include "jni/jni_internal.h"
67 #include "jvalue-inl.h"
68 #include "jvalue.h"
69 #include "jvmti.h"
70 #include "mirror/class.h"
71 #include "mirror/dex_cache.h"
72 #include "nativehelper/scoped_local_ref.h"
73 #include "scoped_thread_state_change-inl.h"
74 #include "scoped_thread_state_change.h"
75 #include "stack.h"
76 #include "thread-current-inl.h"
77 #include "thread.h"
78 #include "thread_list.h"
79 #include "thread_pool.h"
80 #include "thread_state.h"
81 #include "ti_logging.h"
82 #include "ti_thread.h"
83 #include "well_known_classes-inl.h"
84 
85 namespace openjdkjvmti {
86 
87 template <typename FrameFn>
88 struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitoropenjdkjvmti::GetStackTraceVisitor89   GetStackTraceVisitor(art::Thread* thread_in,
90                        size_t start_,
91                        size_t stop_,
92                        FrameFn fn_)
93       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
94         fn(fn_),
95         start(start_),
96         stop(stop_) {}
97   GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
98   GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
99 
VisitFrameopenjdkjvmti::GetStackTraceVisitor100   bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
101     art::ArtMethod* m = GetMethod();
102     if (m->IsRuntimeMethod()) {
103       return true;
104     }
105 
106     if (start == 0) {
107       m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
108       jmethodID id = art::jni::EncodeArtMethod(m);
109 
110       uint32_t dex_pc = GetDexPc(false);
111       jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
112 
113       jvmtiFrameInfo info = { id, dex_location };
114       fn(info);
115 
116       if (stop == 1) {
117         return false;  // We're done.
118       } else if (stop > 0) {
119         stop--;
120       }
121     } else {
122       start--;
123     }
124 
125     return true;
126   }
127 
128   FrameFn fn;
129   size_t start;
130   size_t stop;
131 };
132 
GetOrCreateShadowFrame(bool * created_frame)133 art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
134   art::ShadowFrame* cur = GetCurrentShadowFrame();
135   if (cur == nullptr) {
136     *created_frame = true;
137     art::ArtMethod* method = GetMethod();
138     const uint16_t num_regs = method->DexInstructionData().RegistersSize();
139     cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
140                                                        num_regs,
141                                                        method,
142                                                        GetDexPc());
143     DCHECK(cur != nullptr);
144   } else {
145     *created_frame = false;
146   }
147   return cur;
148 }
149 
150 template <typename FrameFn>
MakeStackTraceVisitor(art::Thread * thread_in,size_t start,size_t stop,FrameFn fn)151 GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
152                                                     size_t start,
153                                                     size_t stop,
154                                                     FrameFn fn) {
155   return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
156 }
157 
158 struct GetStackTraceVectorClosure : public art::Closure {
159  public:
GetStackTraceVectorClosureopenjdkjvmti::GetStackTraceVectorClosure160   GetStackTraceVectorClosure(size_t start, size_t stop)
161       : start_input(start),
162         stop_input(stop),
163         start_result(0),
164         stop_result(0) {}
165 
Runopenjdkjvmti::GetStackTraceVectorClosure166   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
167     auto frames_fn = [&](jvmtiFrameInfo info) {
168       frames.push_back(info);
169     };
170     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
171     visitor.WalkStack(/* include_transitions= */ false);
172 
173     start_result = visitor.start;
174     stop_result = visitor.stop;
175   }
176 
177   const size_t start_input;
178   const size_t stop_input;
179 
180   std::vector<jvmtiFrameInfo> frames;
181   size_t start_result;
182   size_t stop_result;
183 };
184 
TranslateFrameVector(const std::vector<jvmtiFrameInfo> & frames,jint start_depth,size_t start_result,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)185 static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
186                                        jint start_depth,
187                                        size_t start_result,
188                                        jint max_frame_count,
189                                        jvmtiFrameInfo* frame_buffer,
190                                        jint* count_ptr) {
191   size_t collected_frames = frames.size();
192 
193   // Assume we're here having collected something.
194   DCHECK_GT(max_frame_count, 0);
195 
196   // Frames from the top.
197   if (start_depth >= 0) {
198     if (start_result != 0) {
199       // Not enough frames.
200       return ERR(ILLEGAL_ARGUMENT);
201     }
202     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
203     if (frames.size() > 0) {
204       memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
205     }
206     *count_ptr = static_cast<jint>(frames.size());
207     return ERR(NONE);
208   }
209 
210   // Frames from the bottom.
211   if (collected_frames < static_cast<size_t>(-start_depth)) {
212     return ERR(ILLEGAL_ARGUMENT);
213   }
214 
215   size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
216   memcpy(frame_buffer,
217          &frames.data()[collected_frames + start_depth],
218          count * sizeof(jvmtiFrameInfo));
219   *count_ptr = static_cast<jint>(count);
220   return ERR(NONE);
221 }
222 
223 struct GetStackTraceDirectClosure : public art::Closure {
224  public:
GetStackTraceDirectClosureopenjdkjvmti::GetStackTraceDirectClosure225   GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
226       : frame_buffer(frame_buffer_),
227         start_input(start),
228         stop_input(stop),
229         index(0) {
230     DCHECK_GE(start_input, 0u);
231   }
232 
Runopenjdkjvmti::GetStackTraceDirectClosure233   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
234     auto frames_fn = [&](jvmtiFrameInfo info) {
235       frame_buffer[index] = info;
236       ++index;
237     };
238     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
239     visitor.WalkStack(/* include_transitions= */ false);
240   }
241 
242   jvmtiFrameInfo* frame_buffer;
243 
244   const size_t start_input;
245   const size_t stop_input;
246 
247   size_t index = 0;
248 };
249 
GetStackTrace(jvmtiEnv * jvmti_env,jthread java_thread,jint start_depth,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)250 jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env,
251                                     jthread java_thread,
252                                     jint start_depth,
253                                     jint max_frame_count,
254                                     jvmtiFrameInfo* frame_buffer,
255                                     jint* count_ptr) {
256   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
257   // that the thread isn't dying on us.
258   art::ScopedObjectAccess soa(art::Thread::Current());
259   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
260 
261   art::Thread* thread;
262   jvmtiError thread_error = ERR(INTERNAL);
263   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
264     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
265     return thread_error;
266   }
267   DCHECK(thread != nullptr);
268 
269   art::ThreadState state = thread->GetState();
270   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
271     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
272     return ERR(THREAD_NOT_ALIVE);
273   }
274 
275   if (max_frame_count < 0) {
276     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
277     return ERR(ILLEGAL_ARGUMENT);
278   }
279   if (frame_buffer == nullptr || count_ptr == nullptr) {
280     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
281     return ERR(NULL_POINTER);
282   }
283 
284   if (max_frame_count == 0) {
285     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
286     *count_ptr = 0;
287     return ERR(NONE);
288   }
289 
290   if (start_depth >= 0) {
291     // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
292     GetStackTraceDirectClosure closure(frame_buffer,
293                                        static_cast<size_t>(start_depth),
294                                        static_cast<size_t>(max_frame_count));
295     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
296     if (!thread->RequestSynchronousCheckpoint(&closure)) {
297       return ERR(THREAD_NOT_ALIVE);
298     }
299     *count_ptr = static_cast<jint>(closure.index);
300     if (closure.index == 0) {
301       JVMTI_LOG(INFO, jvmti_env) << "The stack is not large enough for a start_depth of "
302                                  << start_depth << ".";
303       return ERR(ILLEGAL_ARGUMENT);
304     }
305     return ERR(NONE);
306   } else {
307     GetStackTraceVectorClosure closure(0, 0);
308     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
309     if (!thread->RequestSynchronousCheckpoint(&closure)) {
310       return ERR(THREAD_NOT_ALIVE);
311     }
312 
313     return TranslateFrameVector(closure.frames,
314                                 start_depth,
315                                 closure.start_result,
316                                 max_frame_count,
317                                 frame_buffer,
318                                 count_ptr);
319   }
320 }
321 
322 template <typename Data>
323 struct GetAllStackTracesVectorClosure : public art::Closure {
GetAllStackTracesVectorClosureopenjdkjvmti::GetAllStackTracesVectorClosure324   GetAllStackTracesVectorClosure(size_t stop, Data* data_)
325       : barrier(0), stop_input(stop), data(data_) {}
326 
Runopenjdkjvmti::GetAllStackTracesVectorClosure327   void Run(art::Thread* thread) override
328       REQUIRES_SHARED(art::Locks::mutator_lock_)
329       REQUIRES(!data->mutex) {
330     art::Thread* self = art::Thread::Current();
331     Work(thread, self);
332     barrier.Pass(self);
333   }
334 
Workopenjdkjvmti::GetAllStackTracesVectorClosure335   void Work(art::Thread* thread, art::Thread* self)
336       REQUIRES_SHARED(art::Locks::mutator_lock_)
337       REQUIRES(!data->mutex) {
338     // Skip threads that are still starting.
339     if (thread->IsStillStarting()) {
340       return;
341     }
342 
343     std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
344     if (thread_frames == nullptr) {
345       return;
346     }
347 
348     // Now collect the data.
349     auto frames_fn = [&](jvmtiFrameInfo info) {
350       thread_frames->push_back(info);
351     };
352     auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
353     visitor.WalkStack(/* include_transitions= */ false);
354   }
355 
356   art::Barrier barrier;
357   const size_t stop_input;
358   Data* data;
359 };
360 
361 template <typename Data>
RunCheckpointAndWait(Data * data,size_t max_frame_count)362 static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
363     REQUIRES_SHARED(art::Locks::mutator_lock_) {
364   // Note: requires the mutator lock as the checkpoint requires the mutator lock.
365   GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
366   // TODO(b/253671779): Replace this use of RunCheckpointUnchecked() with RunCheckpoint(). This is
367   // currently not possible, since the following undesirable call chain (abbreviated here) is then
368   // possible and exercised by current tests: (jvmti) GetAllStackTraces -> <this function> ->
369   // RunCheckpoint -> GetStackTraceVisitor -> EncodeMethodId -> Class::EnsureMethodIds ->
370   // Class::Alloc -> AllocObjectWithAllocator -> potentially suspends, or runs GC, etc. -> CHECK
371   // failure.
372   size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpointUnchecked(&closure);
373   if (barrier_count == 0) {
374     return;
375   }
376   art::Thread* self = art::Thread::Current();
377   art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
378   closure.barrier.Increment(self, barrier_count);
379 }
380 
GetAllStackTraces(jvmtiEnv * env,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr,jint * thread_count_ptr)381 jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
382                                         jint max_frame_count,
383                                         jvmtiStackInfo** stack_info_ptr,
384                                         jint* thread_count_ptr) {
385   if (max_frame_count < 0) {
386     return ERR(ILLEGAL_ARGUMENT);
387   }
388   if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
389     return ERR(NULL_POINTER);
390   }
391 
392   struct AllStackTracesData {
393     AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
394     ~AllStackTracesData() {
395       JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
396       for (jthread global_thread_ref : thread_peers) {
397         jni_env->DeleteGlobalRef(global_thread_ref);
398       }
399     }
400 
401     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
402         REQUIRES_SHARED(art::Locks::mutator_lock_)
403         REQUIRES(!mutex) {
404       art::MutexLock mu(self, mutex);
405 
406       threads.push_back(thread);
407 
408       jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
409           self, thread->GetPeerFromOtherThread());
410       thread_peers.push_back(peer);
411 
412       frames.emplace_back(new std::vector<jvmtiFrameInfo>());
413       return frames.back().get();
414     }
415 
416     art::Mutex mutex;
417 
418     // Storage. Only access directly after completion.
419 
420     std::vector<art::Thread*> threads;
421     // "thread_peers" contains global references to their peers.
422     std::vector<jthread> thread_peers;
423 
424     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
425   };
426 
427   AllStackTracesData data;
428   art::Thread* current = art::Thread::Current();
429   {
430     art::ScopedObjectAccess soa(current);
431     RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
432   }
433 
434   // Convert the data into our output format.
435 
436   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
437   //       allocate one big chunk for this and the actual frames, which means we need
438   //       to either be conservative or rearrange things later (the latter is implemented).
439   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
440   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
441   frame_infos.reserve(data.frames.size());
442 
443   // Now run through and add data for each thread.
444   size_t sum_frames = 0;
445   for (size_t index = 0; index < data.frames.size(); ++index) {
446     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
447     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
448 
449     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
450 
451     // For the time being, set the thread to null. We'll fix it up in the second stage.
452     stack_info.thread = nullptr;
453     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
454 
455     size_t collected_frames = thread_frames.size();
456     if (max_frame_count == 0 || collected_frames == 0) {
457       stack_info.frame_count = 0;
458       stack_info.frame_buffer = nullptr;
459       continue;
460     }
461     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
462 
463     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
464     frame_infos.emplace_back(frame_info);
465 
466     jint count;
467     jvmtiError translate_result = TranslateFrameVector(thread_frames,
468                                                        0,
469                                                        0,
470                                                        static_cast<jint>(collected_frames),
471                                                        frame_info,
472                                                        &count);
473     DCHECK(translate_result == JVMTI_ERROR_NONE);
474     stack_info.frame_count = static_cast<jint>(collected_frames);
475     stack_info.frame_buffer = frame_info;
476     sum_frames += static_cast<size_t>(count);
477   }
478 
479   // No errors, yet. Now put it all into an output buffer.
480   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
481                                                 alignof(jvmtiFrameInfo));
482   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
483   unsigned char* chunk_data;
484   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
485   if (alloc_result != ERR(NONE)) {
486     return alloc_result;
487   }
488 
489   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
490   // First copy in all the basic data.
491   memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
492 
493   // Now copy the frames and fix up the pointers.
494   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
495       chunk_data + rounded_stack_info_size);
496   for (size_t i = 0; i < data.frames.size(); ++i) {
497     jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
498     jvmtiStackInfo& new_stack_info = stack_info[i];
499 
500     // Translate the global ref into a local ref.
501     new_stack_info.thread =
502         static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
503 
504     if (old_stack_info.frame_count > 0) {
505       // Only copy when there's data - leave the nullptr alone.
506       size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
507       memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
508       new_stack_info.frame_buffer = frame_info;
509       frame_info += old_stack_info.frame_count;
510     }
511   }
512 
513   *stack_info_ptr = stack_info;
514   *thread_count_ptr = static_cast<jint>(data.frames.size());
515 
516   return ERR(NONE);
517 }
518 
GetThreadListStackTraces(jvmtiEnv * env,jint thread_count,const jthread * thread_list,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr)519 jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
520                                                jint thread_count,
521                                                const jthread* thread_list,
522                                                jint max_frame_count,
523                                                jvmtiStackInfo** stack_info_ptr) {
524   if (max_frame_count < 0) {
525     return ERR(ILLEGAL_ARGUMENT);
526   }
527   if (thread_count < 0) {
528     return ERR(ILLEGAL_ARGUMENT);
529   }
530   if (thread_count == 0) {
531     *stack_info_ptr = nullptr;
532     return ERR(NONE);
533   }
534   if (thread_list == nullptr || stack_info_ptr == nullptr) {
535     return ERR(NULL_POINTER);
536   }
537 
538   art::Thread* current = art::Thread::Current();
539   art::ScopedObjectAccess soa(current);      // Now we know we have the shared lock.
540 
541   struct SelectStackTracesData {
542     SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
543 
544     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
545               REQUIRES_SHARED(art::Locks::mutator_lock_)
546               REQUIRES(!mutex) {
547       art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
548       for (size_t index = 0; index != handles.size(); ++index) {
549         if (peer == handles[index].Get()) {
550           // Found the thread.
551           art::MutexLock mu(self, mutex);
552 
553           thread_list_indices.push_back(index);
554 
555           frames.emplace_back(new std::vector<jvmtiFrameInfo>());
556           return frames.back().get();
557         }
558       }
559       return nullptr;
560     }
561 
562     art::Mutex mutex;
563 
564     // Selection data.
565 
566     std::vector<art::Handle<art::mirror::Object>> handles;
567 
568     // Storage. Only access directly after completion.
569 
570     std::vector<size_t> thread_list_indices;
571 
572     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
573   };
574 
575   SelectStackTracesData data;
576 
577   // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
578   art::VariableSizedHandleScope hs(current);
579   for (jint i = 0; i != thread_count; ++i) {
580     if (thread_list[i] == nullptr) {
581       return ERR(INVALID_THREAD);
582     }
583     art::ObjPtr<art::mirror::Object> thread = soa.Decode<art::mirror::Object>(thread_list[i]);
584     if (!thread->InstanceOf(art::WellKnownClasses::java_lang_Thread.Get())) {
585       return ERR(INVALID_THREAD);
586     }
587     data.handles.push_back(hs.NewHandle(thread));
588   }
589 
590   RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
591 
592   // Convert the data into our output format.
593 
594   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
595   //       allocate one big chunk for this and the actual frames, which means we need
596   //       to either be conservative or rearrange things later (the latter is implemented).
597   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
598   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
599   frame_infos.reserve(data.frames.size());
600 
601   // Now run through and add data for each thread.
602   size_t sum_frames = 0;
603   for (size_t index = 0; index < data.frames.size(); ++index) {
604     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
605     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
606 
607     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
608 
609     // For the time being, set the thread to null. We don't have good ScopedLocalRef
610     // infrastructure.
611     stack_info.thread = nullptr;
612     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
613 
614     size_t collected_frames = thread_frames.size();
615     if (max_frame_count == 0 || collected_frames == 0) {
616       stack_info.frame_count = 0;
617       stack_info.frame_buffer = nullptr;
618       continue;
619     }
620     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
621 
622     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
623     frame_infos.emplace_back(frame_info);
624 
625     jint count;
626     jvmtiError translate_result = TranslateFrameVector(thread_frames,
627                                                        0,
628                                                        0,
629                                                        static_cast<jint>(collected_frames),
630                                                        frame_info,
631                                                        &count);
632     DCHECK(translate_result == JVMTI_ERROR_NONE);
633     stack_info.frame_count = static_cast<jint>(collected_frames);
634     stack_info.frame_buffer = frame_info;
635     sum_frames += static_cast<size_t>(count);
636   }
637 
638   // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
639   // potentially.
640   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
641                                                 alignof(jvmtiFrameInfo));
642   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
643   unsigned char* chunk_data;
644   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
645   if (alloc_result != ERR(NONE)) {
646     return alloc_result;
647   }
648 
649   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
650   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
651       chunk_data + rounded_stack_info_size);
652 
653   for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
654     // Check whether we found a running thread for this.
655     // Note: For simplicity, and with the expectation that the list is usually small, use a simple
656     //       search. (The list is *not* sorted!)
657     auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
658     if (it == data.thread_list_indices.end()) {
659       // No native thread. Must be new or dead. We need to fill out the stack info now.
660       // (Need to read the Java "started" field to know whether this is starting or terminated.)
661       art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
662       art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
663       art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
664       CHECK(started_field != nullptr);
665       bool started = started_field->GetBoolean(peer) != 0;
666       constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
667       constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
668           JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
669       stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
670       stack_info[i].state = started ? kTerminatedState : kStartedState;
671       stack_info[i].frame_count = 0;
672       stack_info[i].frame_buffer = nullptr;
673     } else {
674       // Had a native thread and frames.
675       size_t f_index = it - data.thread_list_indices.begin();
676 
677       jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
678       jvmtiStackInfo& new_stack_info = stack_info[i];
679 
680       memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
681       new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
682       if (old_stack_info.frame_count > 0) {
683         // Only copy when there's data - leave the nullptr alone.
684         size_t frames_size =
685             static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
686         memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
687         new_stack_info.frame_buffer = frame_info;
688         frame_info += old_stack_info.frame_count;
689       }
690     }
691   }
692 
693   *stack_info_ptr = stack_info;
694 
695   return ERR(NONE);
696 }
697 
698 struct GetFrameCountClosure : public art::Closure {
699  public:
GetFrameCountClosureopenjdkjvmti::GetFrameCountClosure700   GetFrameCountClosure() : count(0) {}
701 
Runopenjdkjvmti::GetFrameCountClosure702   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
703     // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
704     // counted.
705     art::StackVisitor::WalkStack(
706         [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
707           art::ArtMethod* m = stack_visitor->GetMethod();
708           if (m != nullptr && !m->IsRuntimeMethod()) {
709             count++;
710           }
711           return true;
712         },
713         self,
714         /* context= */ nullptr,
715         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
716   }
717 
718   size_t count;
719 };
720 
GetFrameCount(jvmtiEnv * env,jthread java_thread,jint * count_ptr)721 jvmtiError StackUtil::GetFrameCount([[maybe_unused]] jvmtiEnv* env,
722                                     jthread java_thread,
723                                     jint* count_ptr) {
724   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
725   // that the thread isn't dying on us.
726   art::ScopedObjectAccess soa(art::Thread::Current());
727   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
728 
729   art::Thread* thread;
730   jvmtiError thread_error = ERR(INTERNAL);
731   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
732     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
733     return thread_error;
734   }
735 
736   DCHECK(thread != nullptr);
737   art::ThreadState state = thread->GetState();
738   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
739     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
740     return ERR(THREAD_NOT_ALIVE);
741   }
742 
743   if (count_ptr == nullptr) {
744     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
745     return ERR(NULL_POINTER);
746   }
747 
748   GetFrameCountClosure closure;
749   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
750   if (!thread->RequestSynchronousCheckpoint(&closure)) {
751     return ERR(THREAD_NOT_ALIVE);
752   }
753 
754   *count_ptr = closure.count;
755   return ERR(NONE);
756 }
757 
758 struct GetLocationClosure : public art::Closure {
759  public:
GetLocationClosureopenjdkjvmti::GetLocationClosure760   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
761 
Runopenjdkjvmti::GetLocationClosure762   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
763     // Walks up the stack 'n' callers.
764     size_t count = 0u;
765     art::StackVisitor::WalkStack(
766         [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
767           art::ArtMethod* m = stack_visitor->GetMethod();
768           if (m != nullptr && !m->IsRuntimeMethod()) {
769             DCHECK(method == nullptr);
770             if (count == n) {
771               method = m;
772               dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
773               return false;
774             }
775             count++;
776           }
777           return true;
778         },
779         self,
780         /* context= */ nullptr,
781         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
782   }
783 
784   const size_t n;
785   art::ArtMethod* method;
786   uint32_t dex_pc;
787 };
788 
GetFrameLocation(jvmtiEnv * env,jthread java_thread,jint depth,jmethodID * method_ptr,jlocation * location_ptr)789 jvmtiError StackUtil::GetFrameLocation([[maybe_unused]] jvmtiEnv* env,
790                                        jthread java_thread,
791                                        jint depth,
792                                        jmethodID* method_ptr,
793                                        jlocation* location_ptr) {
794   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
795   // that the thread isn't dying on us.
796   art::ScopedObjectAccess soa(art::Thread::Current());
797   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
798 
799   art::Thread* thread;
800   jvmtiError thread_error = ERR(INTERNAL);
801   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
802     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
803     return thread_error;
804   }
805   DCHECK(thread != nullptr);
806 
807   art::ThreadState state = thread->GetState();
808   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
809     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
810     return ERR(THREAD_NOT_ALIVE);
811   }
812 
813   if (depth < 0) {
814     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
815     return ERR(ILLEGAL_ARGUMENT);
816   }
817   if (method_ptr == nullptr || location_ptr == nullptr) {
818     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
819     return ERR(NULL_POINTER);
820   }
821 
822   GetLocationClosure closure(static_cast<size_t>(depth));
823   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
824   if (!thread->RequestSynchronousCheckpoint(&closure)) {
825     return ERR(THREAD_NOT_ALIVE);
826   }
827 
828   if (closure.method == nullptr) {
829     return ERR(NO_MORE_FRAMES);
830   }
831 
832   *method_ptr = art::jni::EncodeArtMethod(closure.method);
833   if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
834     *location_ptr = -1;
835   } else {
836     if (closure.dex_pc == art::dex::kDexNoIndex) {
837       return ERR(INTERNAL);
838     }
839     *location_ptr = static_cast<jlocation>(closure.dex_pc);
840   }
841 
842   return ERR(NONE);
843 }
844 
845 struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
846   // We need a context because VisitLocks needs it retrieve the monitor objects.
847   explicit MonitorVisitor(art::Thread* thread)
REQUIRES_SHAREDopenjdkjvmti::MonitorVisitor848       REQUIRES_SHARED(art::Locks::mutator_lock_)
849       : art::StackVisitor(thread,
850                           art::Context::Create(),
851                           art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
852         hs(art::Thread::Current()),
853         current_stack_depth(0) {}
854 
~MonitorVisitoropenjdkjvmti::MonitorVisitor855   ~MonitorVisitor() {
856     delete context_;
857   }
858 
VisitFrameopenjdkjvmti::MonitorVisitor859   bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
860     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
861     if (!GetMethod()->IsRuntimeMethod()) {
862       art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
863       ++current_stack_depth;
864     }
865     return true;
866   }
867 
AppendOwnedMonitorsopenjdkjvmti::MonitorVisitor868   static void AppendOwnedMonitors(art::ObjPtr<art::mirror::Object> owned_monitor, void* arg)
869       REQUIRES_SHARED(art::Locks::mutator_lock_) {
870     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
871     MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
872     // Filter out duplicates.
873     for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
874       if (monitor.Get() == owned_monitor) {
875         return;
876       }
877     }
878     visitor->monitors.push_back(visitor->hs.NewHandle(owned_monitor));
879     visitor->stack_depths.push_back(visitor->current_stack_depth);
880   }
881 
VisitRootopenjdkjvmti::MonitorVisitor882   void VisitRoot(art::mirror::Object* obj, [[maybe_unused]] const art::RootInfo& info) override
883       REQUIRES_SHARED(art::Locks::mutator_lock_) {
884     for (const art::Handle<art::mirror::Object>& m : monitors) {
885       if (m.Get() == obj) {
886         return;
887       }
888     }
889     monitors.push_back(hs.NewHandle(obj));
890     stack_depths.push_back(-1);
891   }
892 
893   art::VariableSizedHandleScope hs;
894   jint current_stack_depth;
895   std::vector<art::Handle<art::mirror::Object>> monitors;
896   std::vector<jint> stack_depths;
897 };
898 
899 template<typename Fn>
900 struct MonitorInfoClosure : public art::Closure {
901  public:
MonitorInfoClosureopenjdkjvmti::MonitorInfoClosure902   explicit MonitorInfoClosure(Fn handle_results)
903       : err_(OK), handle_results_(handle_results) {}
904 
Runopenjdkjvmti::MonitorInfoClosure905   void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
906     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
907     // Find the monitors on the stack.
908     MonitorVisitor visitor(target);
909     visitor.WalkStack(/* include_transitions= */ false);
910     // Find any other monitors, including ones acquired in native code.
911     art::RootInfo root_info(art::kRootVMInternal);
912     target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
913     err_ = handle_results_(visitor);
914   }
915 
GetErroropenjdkjvmti::MonitorInfoClosure916   jvmtiError GetError() {
917     return err_;
918   }
919 
920  private:
921   jvmtiError err_;
922   Fn handle_results_;
923 };
924 
925 
926 template <typename Fn>
GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable & soa,jthread thread,Fn handle_results)927 static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
928                                             jthread thread,
929                                             Fn handle_results)
930     REQUIRES_SHARED(art::Locks::mutator_lock_) {
931   art::Thread* self = art::Thread::Current();
932   MonitorInfoClosure<Fn> closure(handle_results);
933   bool called_method = false;
934   {
935     art::Locks::thread_list_lock_->ExclusiveLock(self);
936     art::Thread* target = nullptr;
937     jvmtiError err = ERR(INTERNAL);
938     if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
939       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
940       return err;
941     }
942     if (target != self) {
943       called_method = true;
944       // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
945       // Since this deals with object references we need to avoid going to sleep.
946       art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
947       if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
948         return ERR(THREAD_NOT_ALIVE);
949       }
950     } else {
951       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
952     }
953   }
954   // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
955   // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
956   // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
957   // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
958   if (!called_method) {
959     closure.Run(self);
960   }
961   return closure.GetError();
962 }
963 
GetOwnedMonitorStackDepthInfo(jvmtiEnv * env,jthread thread,jint * info_cnt,jvmtiMonitorStackDepthInfo ** info_ptr)964 jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
965                                                     jthread thread,
966                                                     jint* info_cnt,
967                                                     jvmtiMonitorStackDepthInfo** info_ptr) {
968   if (info_cnt == nullptr || info_ptr == nullptr) {
969     return ERR(NULL_POINTER);
970   }
971   art::ScopedObjectAccess soa(art::Thread::Current());
972   std::vector<art::GcRoot<art::mirror::Object>> mons;
973   std::vector<uint32_t> depths;
974   auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
975     for (size_t i = 0; i < visitor.monitors.size(); i++) {
976       mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
977       depths.push_back(visitor.stack_depths[i]);
978     }
979     return OK;
980   };
981   jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
982   if (err != OK) {
983     return err;
984   }
985   auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
986   err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
987   if (err != OK) {
988     return err;
989   }
990   *info_cnt = mons.size();
991   for (uint32_t i = 0; i < mons.size(); i++) {
992     (*info_ptr)[i] = {
993       soa.AddLocalReference<jobject>(mons[i].Read()),
994       static_cast<jint>(depths[i])
995     };
996   }
997   return err;
998 }
999 
GetOwnedMonitorInfo(jvmtiEnv * env,jthread thread,jint * owned_monitor_count_ptr,jobject ** owned_monitors_ptr)1000 jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
1001                                           jthread thread,
1002                                           jint* owned_monitor_count_ptr,
1003                                           jobject** owned_monitors_ptr) {
1004   if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
1005     return ERR(NULL_POINTER);
1006   }
1007   art::ScopedObjectAccess soa(art::Thread::Current());
1008   std::vector<art::GcRoot<art::mirror::Object>> mons;
1009   auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1010     for (size_t i = 0; i < visitor.monitors.size(); i++) {
1011       mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
1012     }
1013     return OK;
1014   };
1015   jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
1016   if (err != OK) {
1017     return err;
1018   }
1019   auto nbytes = sizeof(jobject) * mons.size();
1020   err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1021   if (err != OK) {
1022     return err;
1023   }
1024   *owned_monitor_count_ptr = mons.size();
1025   for (uint32_t i = 0; i < mons.size(); i++) {
1026     (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1027   }
1028   return err;
1029 }
1030 
NotifyFramePop(jvmtiEnv * env,jthread thread,jint depth)1031 jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1032   if (depth < 0) {
1033     return ERR(ILLEGAL_ARGUMENT);
1034   }
1035   ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1036   art::Thread* self = art::Thread::Current();
1037   art::Thread* target;
1038 
1039   ScopedNoUserCodeSuspension snucs(self);
1040   // From now on we know we cannot get suspended by user-code.
1041   // NB This does a SuspendCheck (during thread state change) so we need to make
1042   // sure we don't have the 'suspend_lock' locked here.
1043   art::ScopedObjectAccess soa(self);
1044   art::Locks::thread_list_lock_->ExclusiveLock(self);
1045   jvmtiError err = ERR(INTERNAL);
1046   if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1047     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1048     return err;
1049   }
1050   if (target != self) {
1051     // TODO This is part of the spec but we could easily avoid needing to do it.
1052     // We would just put all the logic into a sync-checkpoint.
1053     art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
1054     if (target->GetUserCodeSuspendCount() == 0) {
1055       art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1056       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1057       return ERR(THREAD_NOT_SUSPENDED);
1058     }
1059     art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1060   }
1061   // We hold the user_code_suspension_lock_ so the target thread is staying
1062   // suspended until we are done (unless it's 'self' in which case we don't care
1063   // since we aren't going to be returning).
1064   // TODO We could implement this using a synchronous checkpoint and not bother
1065   // with any of the suspension stuff. The spec does specifically say to return
1066   // THREAD_NOT_SUSPENDED though. Find the requested stack frame.
1067   std::unique_ptr<art::Context> context(art::Context::Create());
1068   FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1069   visitor.WalkStack();
1070   if (!visitor.FoundFrame()) {
1071     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1072     return ERR(NO_MORE_FRAMES);
1073   }
1074   art::ArtMethod* method = visitor.GetMethod();
1075   if (method->IsNative()) {
1076     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1077     return ERR(OPAQUE_FRAME);
1078   }
1079   // From here we are sure to succeed.
1080   bool needs_instrument = false;
1081   // Get/create a shadow frame
1082   art::ShadowFrame* shadow_frame =
1083       visitor.GetOrCreateShadowFrame(&needs_instrument);
1084   {
1085     art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1086     if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
1087       // Ensure we won't miss exceptions being thrown if we get jit-compiled. We
1088       // only do this for the first NotifyPopFrame.
1089       target->IncrementForceInterpreterCount();
1090 
1091       // Mark shadow frame as needs_notify_pop_
1092       shadow_frame->SetNotifyPop(true);
1093     }
1094     tienv->notify_frames.insert(shadow_frame);
1095   }
1096   // Make sure can we will go to the interpreter and use the shadow frames.
1097   if (needs_instrument) {
1098     art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1099       DeoptManager::Get()->DeoptimizeThread(self);
1100     });
1101     target->RequestSynchronousCheckpoint(&fc);
1102   } else {
1103     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1104   }
1105   return OK;
1106 }
1107 
1108 namespace {
1109 
1110 enum class NonStandardExitType {
1111   kPopFrame,
1112   kForceReturn,
1113 };
1114 
1115 template<NonStandardExitType kExitType>
1116 class NonStandardExitFrames {
1117  public:
NonStandardExitFrames(art::Thread * self,jvmtiEnv * env,jthread thread)1118   NonStandardExitFrames(art::Thread* self, jvmtiEnv* env, jthread thread)
1119       REQUIRES(!art::Locks::thread_suspend_count_lock_)
1120       ACQUIRE_SHARED(art::Locks::mutator_lock_)
1121       ACQUIRE(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1122       : snucs_(self) {
1123     // We keep the user-code-suspend-count lock.
1124     art::Locks::user_code_suspension_lock_->AssertExclusiveHeld(self);
1125 
1126     // From now on we know we cannot get suspended by user-code.
1127     // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1128     // have the 'suspend_lock' locked here.
1129     old_state_ = self->TransitionFromSuspendedToRunnable();
1130     art::ScopedObjectAccessUnchecked soau(self);
1131 
1132     art::Locks::thread_list_lock_->ExclusiveLock(self);
1133 
1134     if (!ThreadUtil::GetAliveNativeThread(thread, soau, &target_, &result_)) {
1135       return;
1136     }
1137     {
1138       art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1139       if (target_ != self && target_->GetUserCodeSuspendCount() == 0) {
1140         // We cannot be the current thread for this function.
1141         result_ = ERR(THREAD_NOT_SUSPENDED);
1142         return;
1143       }
1144     }
1145     JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target_);
1146     constexpr art::StackVisitor::StackWalkKind kWalkKind =
1147         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1148     if (tls_data != nullptr &&
1149         tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1150         tls_data->disable_pop_frame_depth ==
1151             art::StackVisitor::ComputeNumFrames(target_, kWalkKind)) {
1152       JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
1153                               << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
1154                               << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
1155                               << "more information.";
1156       result_ = ERR(OPAQUE_FRAME);
1157       return;
1158     }
1159     // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1160     // done.
1161     std::unique_ptr<art::Context> context(art::Context::Create());
1162     FindFrameAtDepthVisitor final_frame(target_, context.get(), 0);
1163     FindFrameAtDepthVisitor penultimate_frame(target_, context.get(), 1);
1164     final_frame.WalkStack();
1165     penultimate_frame.WalkStack();
1166 
1167     if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1168       // Cannot do it if there is only one frame!
1169       JVMTI_LOG(INFO, env) << "Can not pop final frame off of a stack";
1170       result_ = ERR(NO_MORE_FRAMES);
1171       return;
1172     }
1173 
1174     art::ArtMethod* called_method = final_frame.GetMethod();
1175     art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1176     if (!CheckFunctions(env, calling_method, called_method)) {
1177       return;
1178     }
1179     DCHECK(!called_method->IsNative()) << called_method->PrettyMethod();
1180 
1181     // From here we are sure to succeed.
1182     result_ = OK;
1183 
1184     // Get/create a shadow frame
1185     final_frame_ = final_frame.GetOrCreateShadowFrame(&created_final_frame_);
1186     penultimate_frame_ =
1187         (calling_method->IsNative()
1188              ? nullptr
1189              : penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame_));
1190 
1191     final_frame_id_ = final_frame.GetFrameId();
1192     penultimate_frame_id_ = penultimate_frame.GetFrameId();
1193 
1194     CHECK_NE(final_frame_, penultimate_frame_) << "Frames at different depths not different!";
1195   }
1196 
1197   bool CheckFunctions(jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called)
1198       REQUIRES(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1199       REQUIRES_SHARED(art::Locks::mutator_lock_);
1200 
RELEASE_SHARED(art::Locks::mutator_lock_)1201   ~NonStandardExitFrames() RELEASE_SHARED(art::Locks::mutator_lock_)
1202       REQUIRES(!art::Locks::thread_list_lock_)
1203       RELEASE(art::Locks::user_code_suspension_lock_) {
1204     art::Thread* self = art::Thread::Current();
1205     DCHECK_EQ(old_state_, art::ThreadState::kNative)
1206         << "Unexpected thread state on entering PopFrame!";
1207     self->TransitionFromRunnableToSuspended(old_state_);
1208   }
1209 
1210   ScopedNoUserCodeSuspension snucs_;
1211   art::ShadowFrame* final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1212   art::ShadowFrame* penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1213   bool created_final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1214   bool created_penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1215   uint32_t final_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1216   uint32_t penultimate_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1217   art::Thread* target_ GUARDED_BY(art::Locks::thread_list_lock_) = nullptr;
1218   art::ThreadState old_state_ = art::ThreadState::kTerminated;
1219   jvmtiError result_ = ERR(INTERNAL);
1220 };
1221 
1222 template <>
CheckFunctions(jvmtiEnv * env,art::ArtMethod * calling,art::ArtMethod * called)1223 bool NonStandardExitFrames<NonStandardExitType::kForceReturn>::CheckFunctions(
1224     jvmtiEnv* env, [[maybe_unused]] art::ArtMethod* calling, art::ArtMethod* called) {
1225   if (UNLIKELY(called->IsNative())) {
1226     result_ = ERR(OPAQUE_FRAME);
1227     JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod()
1228                          << " because it is native.";
1229     return false;
1230   } else {
1231     return true;
1232   }
1233 }
1234 
1235 template <>
CheckFunctions(jvmtiEnv * env,art::ArtMethod * calling,art::ArtMethod * called)1236 bool NonStandardExitFrames<NonStandardExitType::kPopFrame>::CheckFunctions(
1237     jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called) {
1238   if (UNLIKELY(calling->IsNative() || called->IsNative())) {
1239     result_ = ERR(OPAQUE_FRAME);
1240     JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod() << " to "
1241                          << calling->PrettyMethod() << " because at least one of them is native.";
1242     return false;
1243   } else {
1244     return true;
1245   }
1246 }
1247 
1248 class SetupMethodExitEvents {
1249  public:
SetupMethodExitEvents(art::Thread * self,EventHandler * event_handler,jthread target)1250   SetupMethodExitEvents(art::Thread* self,
1251                         EventHandler* event_handler,
1252                         jthread target) REQUIRES(!art::Locks::mutator_lock_,
1253                                                  !art::Locks::user_code_suspension_lock_,
1254                                                  !art::Locks::thread_list_lock_)
1255       : self_(self), event_handler_(event_handler), target_(target) {
1256     DCHECK(target != nullptr);
1257     art::Locks::mutator_lock_->AssertNotHeld(self_);
1258     art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1259     art::Locks::thread_list_lock_->AssertNotHeld(self_);
1260     event_handler_->SetInternalEvent(
1261         target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_ENABLE);
1262   }
1263 
1264   ~SetupMethodExitEvents() REQUIRES(!art::Locks::mutator_lock_,
1265                                     !art::Locks::user_code_suspension_lock_,
1266                                     !art::Locks::thread_list_lock_) {
1267     art::Locks::mutator_lock_->AssertNotHeld(self_);
1268     art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1269     art::Locks::thread_list_lock_->AssertNotHeld(self_);
1270     if (failed_) {
1271       event_handler_->SetInternalEvent(
1272           target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
1273     }
1274   }
1275 
NotifyFailure()1276   void NotifyFailure() {
1277     failed_ = true;
1278   }
1279 
1280  private:
1281   art::Thread* self_;
1282   EventHandler* event_handler_;
1283   jthread target_;
1284   bool failed_ = false;
1285 };
1286 
1287 template <typename T>
1288 void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value)
1289     REQUIRES_SHARED(art::Locks::mutator_lock_)
1290     REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1291 
1292 template <typename T>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,T value)1293 void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value) {
1294   art::JValue val = art::JValue::FromPrimitive(value);
1295   jvalue jval{ .j = val.GetJ() };
1296   handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1297 }
1298 
1299 template <>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,std::nullptr_t null_val)1300 void AddDelayedMethodExitEvent<std::nullptr_t>(EventHandler* handler,
1301                                                art::ShadowFrame* frame,
1302                                                [[maybe_unused]] std::nullptr_t null_val) {
1303   jvalue jval;
1304   memset(&jval, 0, sizeof(jval));
1305   handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1306 }
1307 
1308 template <>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,jobject obj)1309 void AddDelayedMethodExitEvent<jobject>(EventHandler* handler,
1310                                         art::ShadowFrame* frame,
1311                                         jobject obj) {
1312   jvalue jval{ .l = art::Thread::Current()->GetJniEnv()->NewGlobalRef(obj) };
1313   handler->AddDelayedNonStandardExitEvent(frame, true, jval);
1314 }
1315 
1316 template <typename T>
1317 bool ValidReturnType(art::Thread* self, art::ObjPtr<art::mirror::Class> return_type, T value)
1318     REQUIRES_SHARED(art::Locks::mutator_lock_)
1319         REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1320 
1321 #define SIMPLE_VALID_RETURN_TYPE(type, ...)                                                       \
1322   template <>                                                                                     \
1323   bool ValidReturnType<type>([[maybe_unused]] art::Thread * self,                                 \
1324                              art::ObjPtr<art::mirror::Class> return_type,                         \
1325                              [[maybe_unused]] type value) {                                       \
1326     static constexpr std::initializer_list<art::Primitive::Type> types{__VA_ARGS__};              \
1327     return std::find(types.begin(), types.end(), return_type->GetPrimitiveType()) != types.end(); \
1328   }
1329 
1330 SIMPLE_VALID_RETURN_TYPE(jlong, art::Primitive::kPrimLong);
1331 SIMPLE_VALID_RETURN_TYPE(jfloat, art::Primitive::kPrimFloat);
1332 SIMPLE_VALID_RETURN_TYPE(jdouble, art::Primitive::kPrimDouble);
1333 SIMPLE_VALID_RETURN_TYPE(nullptr_t, art::Primitive::kPrimVoid);
1334 SIMPLE_VALID_RETURN_TYPE(jint,
1335                          art::Primitive::kPrimInt,
1336                          art::Primitive::kPrimChar,
1337                          art::Primitive::kPrimBoolean,
1338                          art::Primitive::kPrimShort,
1339                          art::Primitive::kPrimByte);
1340 #undef SIMPLE_VALID_RETURN_TYPE
1341 
1342 template <>
ValidReturnType(art::Thread * self,art::ObjPtr<art::mirror::Class> return_type,jobject return_value)1343 bool ValidReturnType<jobject>(art::Thread* self,
1344                               art::ObjPtr<art::mirror::Class> return_type,
1345                               jobject return_value) {
1346   if (return_type->IsPrimitive()) {
1347     return false;
1348   }
1349   if (return_value == nullptr) {
1350     // Null can be used for anything.
1351     return true;
1352   }
1353   return return_type->IsAssignableFrom(self->DecodeJObject(return_value)->GetClass());
1354 }
1355 
1356 }  // namespace
1357 
PopFrame(jvmtiEnv * env,jthread thread)1358 jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
1359   art::Thread* self = art::Thread::Current();
1360   NonStandardExitFrames<NonStandardExitType::kPopFrame> frames(self, env, thread);
1361   if (frames.result_ != OK) {
1362     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1363     return frames.result_;
1364   }
1365   // Tell the shadow-frame to return immediately and skip all exit events.
1366   frames.penultimate_frame_->SetForceRetryInstruction(true);
1367   frames.final_frame_->SetForcePopFrame(true);
1368   frames.final_frame_->SetSkipMethodExitEvents(true);
1369   if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1370     art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
1371       DeoptManager::Get()->DeoptimizeThread(self);
1372     });
1373     frames.target_->RequestSynchronousCheckpoint(&fc);
1374   } else {
1375     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1376   }
1377   return OK;
1378 }
1379 
1380 template <typename T>
1381 jvmtiError
ForceEarlyReturn(jvmtiEnv * env,EventHandler * event_handler,jthread thread,T value)1382 StackUtil::ForceEarlyReturn(jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value) {
1383   art::Thread* self = art::Thread::Current();
1384   // We don't want to use the null == current-thread idiom since for events (that we use internally
1385   // to implement force-early-return) we instead have null == all threads. Instead just get the
1386   // current jthread if needed.
1387   ScopedLocalRef<jthread> cur_thread(self->GetJniEnv(), nullptr);
1388   if (UNLIKELY(thread == nullptr)) {
1389     art::ScopedObjectAccess soa(self);
1390     cur_thread.reset(soa.AddLocalReference<jthread>(self->GetPeer()));
1391     thread = cur_thread.get();
1392   }
1393   // This sets up the exit events we implement early return using before we have the locks and
1394   // thanks to destructor ordering will tear them down if something goes wrong.
1395   SetupMethodExitEvents smee(self, event_handler, thread);
1396   NonStandardExitFrames<NonStandardExitType::kForceReturn> frames(self, env, thread);
1397   if (frames.result_ != OK) {
1398     smee.NotifyFailure();
1399     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1400     return frames.result_;
1401   } else if (!ValidReturnType<T>(
1402                  self, frames.final_frame_->GetMethod()->ResolveReturnType(), value)) {
1403     smee.NotifyFailure();
1404     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1405     return ERR(TYPE_MISMATCH);
1406   } else if (frames.final_frame_->GetForcePopFrame()) {
1407     // TODO We should really support this.
1408     smee.NotifyFailure();
1409     std::string thread_name;
1410     frames.target_->GetThreadName(thread_name);
1411     JVMTI_LOG(WARNING, env) << "PopFrame or force-return already pending on thread " << thread_name;
1412     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1413     return ERR(OPAQUE_FRAME);
1414   }
1415   // Tell the shadow-frame to return immediately and skip all exit events.
1416   frames.final_frame_->SetForcePopFrame(true);
1417   AddDelayedMethodExitEvent<T>(event_handler, frames.final_frame_, value);
1418   if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1419     art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
1420       DeoptManager::Get()->DeoptimizeThread(self);
1421     });
1422     frames.target_->RequestSynchronousCheckpoint(&fc);
1423   } else {
1424     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1425   }
1426   return OK;
1427 }
1428 
1429 // Instantiate the ForceEarlyReturn templates.
1430 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jint);
1431 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jlong);
1432 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jfloat);
1433 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jdouble);
1434 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jobject);
1435 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, nullptr_t);
1436 
1437 }  // namespace openjdkjvmti
1438