1 /* Copyright (C) 2016 The Android Open Source Project
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This file implements interfaces from the file jvmti.h. This implementation
5  * is licensed under the same terms as the file jvmti.h.  The
6  * copyright and license information for the file jvmti.h follows.
7  *
8  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10  *
11  * This code is free software; you can redistribute it and/or modify it
12  * under the terms of the GNU General Public License version 2 only, as
13  * published by the Free Software Foundation.  Oracle designates this
14  * particular file as subject to the "Classpath" exception as provided
15  * by Oracle in the LICENSE file that accompanied this code.
16  *
17  * This code is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20  * version 2 for more details (a copy is included in the LICENSE file that
21  * accompanied this code).
22  *
23  * You should have received a copy of the GNU General Public License version
24  * 2 along with this work; if not, write to the Free Software Foundation,
25  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26  *
27  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28  * or visit www.oracle.com if you need additional information or have any
29  * questions.
30  */
31 
32 #include "ti_stack.h"
33 
34 #include <algorithm>
35 #include <list>
36 #include <unordered_map>
37 #include <vector>
38 
39 #include "art_field-inl.h"
40 #include "art_method-inl.h"
41 #include "art_jvmti.h"
42 #include "base/bit_utils.h"
43 #include "base/enums.h"
44 #include "base/mutex.h"
45 #include "dex_file.h"
46 #include "dex_file_annotations.h"
47 #include "handle_scope-inl.h"
48 #include "jni_env_ext.h"
49 #include "jni_internal.h"
50 #include "mirror/class.h"
51 #include "mirror/dex_cache.h"
52 #include "scoped_thread_state_change-inl.h"
53 #include "ScopedLocalRef.h"
54 #include "stack.h"
55 #include "thread-inl.h"
56 #include "thread_list.h"
57 #include "thread_pool.h"
58 #include "well_known_classes.h"
59 
60 namespace openjdkjvmti {
61 
62 struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitoropenjdkjvmti::GetStackTraceVisitor63   GetStackTraceVisitor(art::Thread* thread_in,
64                        size_t start_,
65                        size_t stop_)
66       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
67         start(start_),
68         stop(stop_) {}
69 
VisitFrameopenjdkjvmti::GetStackTraceVisitor70   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
71     art::ArtMethod* m = GetMethod();
72     if (m->IsRuntimeMethod()) {
73       return true;
74     }
75 
76     if (start == 0) {
77       m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
78       jmethodID id = art::jni::EncodeArtMethod(m);
79 
80       uint32_t dex_pc = GetDexPc(false);
81       jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
82 
83       jvmtiFrameInfo info = { id, dex_location };
84       frames.push_back(info);
85 
86       if (stop == 1) {
87         return false;  // We're done.
88       } else if (stop > 0) {
89         stop--;
90       }
91     } else {
92       start--;
93     }
94 
95     return true;
96   }
97 
98   std::vector<jvmtiFrameInfo> frames;
99   size_t start;
100   size_t stop;
101 };
102 
103 struct GetStackTraceClosure : public art::Closure {
104  public:
GetStackTraceClosureopenjdkjvmti::GetStackTraceClosure105   GetStackTraceClosure(size_t start, size_t stop)
106       : start_input(start),
107         stop_input(stop),
108         start_result(0),
109         stop_result(0) {}
110 
Runopenjdkjvmti::GetStackTraceClosure111   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
112     GetStackTraceVisitor visitor(self, start_input, stop_input);
113     visitor.WalkStack(false);
114 
115     frames.swap(visitor.frames);
116     start_result = visitor.start;
117     stop_result = visitor.stop;
118   }
119 
120   const size_t start_input;
121   const size_t stop_input;
122 
123   std::vector<jvmtiFrameInfo> frames;
124   size_t start_result;
125   size_t stop_result;
126 };
127 
TranslateFrameVector(const std::vector<jvmtiFrameInfo> & frames,jint start_depth,size_t start_result,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)128 static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
129                                        jint start_depth,
130                                        size_t start_result,
131                                        jint max_frame_count,
132                                        jvmtiFrameInfo* frame_buffer,
133                                        jint* count_ptr) {
134   size_t collected_frames = frames.size();
135 
136   // Assume we're here having collected something.
137   DCHECK_GT(max_frame_count, 0);
138 
139   // Frames from the top.
140   if (start_depth >= 0) {
141     if (start_result != 0) {
142       // Not enough frames.
143       return ERR(ILLEGAL_ARGUMENT);
144     }
145     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
146     if (frames.size() > 0) {
147       memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
148     }
149     *count_ptr = static_cast<jint>(frames.size());
150     return ERR(NONE);
151   }
152 
153   // Frames from the bottom.
154   if (collected_frames < static_cast<size_t>(-start_depth)) {
155     return ERR(ILLEGAL_ARGUMENT);
156   }
157 
158   size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
159   memcpy(frame_buffer,
160          &frames.data()[collected_frames + start_depth],
161          count * sizeof(jvmtiFrameInfo));
162   *count_ptr = static_cast<jint>(count);
163   return ERR(NONE);
164 }
165 
GetThread(JNIEnv * env,jthread java_thread,art::Thread ** thread)166 static jvmtiError GetThread(JNIEnv* env, jthread java_thread, art::Thread** thread) {
167   if (java_thread == nullptr) {
168     *thread = art::Thread::Current();
169     if (*thread == nullptr) {
170       // GetStackTrace can only be run during the live phase, so the current thread should be
171       // attached and thus available. Getting a null for current means we're starting up or
172       // dying.
173       return ERR(WRONG_PHASE);
174     }
175   } else {
176     if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
177       return ERR(INVALID_THREAD);
178     }
179 
180     // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
181     art::ScopedObjectAccess soa(art::Thread::Current());
182     art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
183     *thread = art::Thread::FromManagedThread(soa, java_thread);
184     if (*thread == nullptr) {
185       return ERR(THREAD_NOT_ALIVE);
186     }
187   }
188   return ERR(NONE);
189 }
190 
GetStackTrace(jvmtiEnv * jvmti_env ATTRIBUTE_UNUSED,jthread java_thread,jint start_depth,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)191 jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
192                                     jthread java_thread,
193                                     jint start_depth,
194                                     jint max_frame_count,
195                                     jvmtiFrameInfo* frame_buffer,
196                                     jint* count_ptr) {
197   art::Thread* thread;
198   jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
199   if (thread_error != ERR(NONE)) {
200     return thread_error;
201   }
202   DCHECK(thread != nullptr);
203 
204   art::ThreadState state = thread->GetState();
205   if (state == art::ThreadState::kStarting ||
206       state == art::ThreadState::kTerminated ||
207       thread->IsStillStarting()) {
208     return ERR(THREAD_NOT_ALIVE);
209   }
210 
211   if (max_frame_count < 0) {
212     return ERR(ILLEGAL_ARGUMENT);
213   }
214   if (frame_buffer == nullptr || count_ptr == nullptr) {
215     return ERR(NULL_POINTER);
216   }
217 
218   if (max_frame_count == 0) {
219     *count_ptr = 0;
220     return ERR(NONE);
221   }
222 
223   GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
224                                start_depth >= 0 ? static_cast<size_t>(max_frame_count) : 0);
225   thread->RequestSynchronousCheckpoint(&closure);
226 
227   return TranslateFrameVector(closure.frames,
228                               start_depth,
229                               closure.start_result,
230                               max_frame_count,
231                               frame_buffer,
232                               count_ptr);
233 }
234 
235 struct GetAllStackTraceClosure : public art::Closure {
236  public:
GetAllStackTraceClosureopenjdkjvmti::GetAllStackTraceClosure237   explicit GetAllStackTraceClosure(size_t stop)
238       : start_input(0),
239         stop_input(stop),
240         frames_lock("GetAllStackTraceGuard", art::LockLevel::kAbortLock),
241         start_result(0),
242         stop_result(0) {}
243 
Runopenjdkjvmti::GetAllStackTraceClosure244   void Run(art::Thread* self)
245       OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!frames_lock) {
246     // self should be live here (so it could be suspended). No need to filter.
247 
248     art::Thread* current = art::Thread::Current();
249     std::vector<jvmtiFrameInfo> self_frames;
250 
251     GetStackTraceVisitor visitor(self, start_input, stop_input);
252     visitor.WalkStack(false);
253 
254     self_frames.swap(visitor.frames);
255 
256     art::MutexLock mu(current, frames_lock);
257     frames.emplace(self, self_frames);
258   }
259 
260   const size_t start_input;
261   const size_t stop_input;
262 
263   art::Mutex frames_lock;
264   std::unordered_map<art::Thread*, std::vector<jvmtiFrameInfo>> frames GUARDED_BY(frames_lock);
265   size_t start_result;
266   size_t stop_result;
267 };
268 
269 
270 
GetAllStackTraces(jvmtiEnv * env,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr,jint * thread_count_ptr)271 jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
272                                         jint max_frame_count,
273                                         jvmtiStackInfo** stack_info_ptr,
274                                         jint* thread_count_ptr) {
275   if (max_frame_count < 0) {
276     return ERR(ILLEGAL_ARGUMENT);
277   }
278   if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
279     return ERR(NULL_POINTER);
280   }
281 
282 
283   art::Thread* current = art::Thread::Current();
284   art::ScopedObjectAccess soa(current);      // Now we know we have the shared lock.
285   art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
286   art::ScopedSuspendAll ssa("GetAllStackTraces");
287 
288   std::vector<art::Thread*> threads;
289   std::vector<std::vector<jvmtiFrameInfo>> frames;
290   {
291     std::list<art::Thread*> thread_list;
292     {
293       art::MutexLock mu(current, *art::Locks::thread_list_lock_);
294       thread_list = art::Runtime::Current()->GetThreadList()->GetList();
295     }
296 
297     for (art::Thread* thread : thread_list) {
298       // Skip threads that are still starting.
299       if (thread->IsStillStarting()) {
300         continue;
301       }
302 
303       GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
304       thread->RequestSynchronousCheckpoint(&closure);
305 
306       threads.push_back(thread);
307       frames.emplace_back();
308       frames.back().swap(closure.frames);
309     }
310   }
311 
312   // Convert the data into our output format. Note: we need to keep the threads suspended,
313   // as we need to access them for their peers.
314 
315   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
316   //       allocate one big chunk for this and the actual frames, which means we need
317   //       to either be conservative or rearrange things later (the latter is implemented).
318   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
319   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
320   frame_infos.reserve(frames.size());
321 
322   // Now run through and add data for each thread.
323   size_t sum_frames = 0;
324   for (size_t index = 0; index < frames.size(); ++index) {
325     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
326     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
327 
328     art::Thread* self = threads[index];
329     const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
330 
331     // For the time being, set the thread to null. We don't have good ScopedLocalRef
332     // infrastructure.
333     DCHECK(self->GetPeerFromOtherThread() != nullptr);
334     stack_info.thread = nullptr;
335     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
336 
337     size_t collected_frames = thread_frames.size();
338     if (max_frame_count == 0 || collected_frames == 0) {
339       stack_info.frame_count = 0;
340       stack_info.frame_buffer = nullptr;
341       continue;
342     }
343     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
344 
345     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
346     frame_infos.emplace_back(frame_info);
347 
348     jint count;
349     jvmtiError translate_result = TranslateFrameVector(thread_frames,
350                                                        0,
351                                                        0,
352                                                        static_cast<jint>(collected_frames),
353                                                        frame_info,
354                                                        &count);
355     DCHECK(translate_result == JVMTI_ERROR_NONE);
356     stack_info.frame_count = static_cast<jint>(collected_frames);
357     stack_info.frame_buffer = frame_info;
358     sum_frames += static_cast<size_t>(count);
359   }
360 
361   // No errors, yet. Now put it all into an output buffer.
362   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * frames.size(),
363                                                 alignof(jvmtiFrameInfo));
364   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
365   unsigned char* chunk_data;
366   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
367   if (alloc_result != ERR(NONE)) {
368     return alloc_result;
369   }
370 
371   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
372   // First copy in all the basic data.
373   memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * frames.size());
374 
375   // Now copy the frames and fix up the pointers.
376   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
377       chunk_data + rounded_stack_info_size);
378   for (size_t i = 0; i < frames.size(); ++i) {
379     jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
380     jvmtiStackInfo& new_stack_info = stack_info[i];
381 
382     jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(
383         threads[i]->GetPeerFromOtherThread());
384     new_stack_info.thread = thread_peer;
385 
386     if (old_stack_info.frame_count > 0) {
387       // Only copy when there's data - leave the nullptr alone.
388       size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
389       memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
390       new_stack_info.frame_buffer = frame_info;
391       frame_info += old_stack_info.frame_count;
392     }
393   }
394 
395   *stack_info_ptr = stack_info;
396   *thread_count_ptr = static_cast<jint>(frames.size());
397 
398   return ERR(NONE);
399 }
400 
GetThreadListStackTraces(jvmtiEnv * env,jint thread_count,const jthread * thread_list,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr)401 jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
402                                                jint thread_count,
403                                                const jthread* thread_list,
404                                                jint max_frame_count,
405                                                jvmtiStackInfo** stack_info_ptr) {
406   if (max_frame_count < 0) {
407     return ERR(ILLEGAL_ARGUMENT);
408   }
409   if (thread_count < 0) {
410     return ERR(ILLEGAL_ARGUMENT);
411   }
412   if (thread_count == 0) {
413     *stack_info_ptr = nullptr;
414     return ERR(NONE);
415   }
416   if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
417     return ERR(NULL_POINTER);
418   }
419 
420   art::Thread* current = art::Thread::Current();
421   art::ScopedObjectAccess soa(current);      // Now we know we have the shared lock.
422 
423   // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
424   art::VariableSizedHandleScope hs(current);
425   std::vector<art::Handle<art::mirror::Object>> handles;
426   for (jint i = 0; i != thread_count; ++i) {
427     if (thread_list[i] == nullptr) {
428       return ERR(INVALID_THREAD);
429     }
430     if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
431       return ERR(INVALID_THREAD);
432     }
433     handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
434   }
435 
436   std::vector<art::Thread*> threads;
437   std::vector<size_t> thread_list_indices;
438   std::vector<std::vector<jvmtiFrameInfo>> frames;
439 
440   {
441     art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
442     art::ScopedSuspendAll ssa("GetThreadListStackTraces");
443 
444     {
445       std::list<art::Thread*> art_thread_list;
446       {
447         art::MutexLock mu(current, *art::Locks::thread_list_lock_);
448         art_thread_list = art::Runtime::Current()->GetThreadList()->GetList();
449       }
450 
451       for (art::Thread* thread : art_thread_list) {
452         if (thread->IsStillStarting()) {
453           // Skip this. We can't get the jpeer, and if it is for a thread in the thread_list,
454           // we'll just report STARTING.
455           continue;
456         }
457 
458         // Get the peer, and check whether we know it.
459         art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
460         for (size_t index = 0; index != handles.size(); ++index) {
461           if (peer == handles[index].Get()) {
462             // Found the thread.
463             GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
464             thread->RequestSynchronousCheckpoint(&closure);
465 
466             threads.push_back(thread);
467             thread_list_indices.push_back(index);
468             frames.emplace_back();
469             frames.back().swap(closure.frames);
470 
471             continue;
472           }
473         }
474 
475         // Must be not started, or dead. We'll deal with it at the end.
476       }
477     }
478   }
479 
480   // Convert the data into our output format.
481 
482   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
483   //       allocate one big chunk for this and the actual frames, which means we need
484   //       to either be conservative or rearrange things later (the latter is implemented).
485   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
486   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
487   frame_infos.reserve(frames.size());
488 
489   // Now run through and add data for each thread.
490   size_t sum_frames = 0;
491   for (size_t index = 0; index < frames.size(); ++index) {
492     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
493     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
494 
495     art::Thread* self = threads[index];
496     const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
497 
498     // For the time being, set the thread to null. We don't have good ScopedLocalRef
499     // infrastructure.
500     DCHECK(self->GetPeerFromOtherThread() != nullptr);
501     stack_info.thread = nullptr;
502     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
503 
504     size_t collected_frames = thread_frames.size();
505     if (max_frame_count == 0 || collected_frames == 0) {
506       stack_info.frame_count = 0;
507       stack_info.frame_buffer = nullptr;
508       continue;
509     }
510     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
511 
512     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
513     frame_infos.emplace_back(frame_info);
514 
515     jint count;
516     jvmtiError translate_result = TranslateFrameVector(thread_frames,
517                                                        0,
518                                                        0,
519                                                        static_cast<jint>(collected_frames),
520                                                        frame_info,
521                                                        &count);
522     DCHECK(translate_result == JVMTI_ERROR_NONE);
523     stack_info.frame_count = static_cast<jint>(collected_frames);
524     stack_info.frame_buffer = frame_info;
525     sum_frames += static_cast<size_t>(count);
526   }
527 
528   // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
529   // potentially.
530   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
531                                                 alignof(jvmtiFrameInfo));
532   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
533   unsigned char* chunk_data;
534   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
535   if (alloc_result != ERR(NONE)) {
536     return alloc_result;
537   }
538 
539   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
540   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
541       chunk_data + rounded_stack_info_size);
542 
543   for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
544     // Check whether we found a running thread for this.
545     // Note: For simplicity, and with the expectation that the list is usually small, use a simple
546     //       search. (The list is *not* sorted!)
547     auto it = std::find(thread_list_indices.begin(), thread_list_indices.end(), i);
548     if (it == thread_list_indices.end()) {
549       // No native thread. Must be new or dead. We need to fill out the stack info now.
550       // (Need to read the Java "started" field to know whether this is starting or terminated.)
551       art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
552       art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
553       art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
554       CHECK(started_field != nullptr);
555       bool started = started_field->GetBoolean(peer) != 0;
556       constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
557       constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
558           JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
559       stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
560       stack_info[i].state = started ? kTerminatedState : kStartedState;
561       stack_info[i].frame_count = 0;
562       stack_info[i].frame_buffer = nullptr;
563     } else {
564       // Had a native thread and frames.
565       size_t f_index = it - thread_list_indices.begin();
566 
567       jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
568       jvmtiStackInfo& new_stack_info = stack_info[i];
569 
570       memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
571       new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
572       if (old_stack_info.frame_count > 0) {
573         // Only copy when there's data - leave the nullptr alone.
574         size_t frames_size =
575             static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
576         memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
577         new_stack_info.frame_buffer = frame_info;
578         frame_info += old_stack_info.frame_count;
579       }
580     }
581   }
582 
583   * stack_info_ptr = stack_info;
584 
585   return ERR(NONE);
586 }
587 
588 // Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
589 // runtime methods and transitions must not be counted.
590 struct GetFrameCountVisitor : public art::StackVisitor {
GetFrameCountVisitoropenjdkjvmti::GetFrameCountVisitor591   explicit GetFrameCountVisitor(art::Thread* thread)
592       : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
593         count(0) {}
594 
VisitFrameopenjdkjvmti::GetFrameCountVisitor595   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
596     art::ArtMethod* m = GetMethod();
597     const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
598     if (do_count) {
599       count++;
600     }
601     return true;
602   }
603 
604   size_t count;
605 };
606 
607 struct GetFrameCountClosure : public art::Closure {
608  public:
GetFrameCountClosureopenjdkjvmti::GetFrameCountClosure609   GetFrameCountClosure() : count(0) {}
610 
Runopenjdkjvmti::GetFrameCountClosure611   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
612     GetFrameCountVisitor visitor(self);
613     visitor.WalkStack(false);
614 
615     count = visitor.count;
616   }
617 
618   size_t count;
619 };
620 
GetFrameCount(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint * count_ptr)621 jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
622                                     jthread java_thread,
623                                     jint* count_ptr) {
624   art::Thread* thread;
625   jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
626   if (thread_error != ERR(NONE)) {
627     return thread_error;
628   }
629   DCHECK(thread != nullptr);
630 
631   if (count_ptr == nullptr) {
632     return ERR(NULL_POINTER);
633   }
634 
635   GetFrameCountClosure closure;
636   thread->RequestSynchronousCheckpoint(&closure);
637 
638   *count_ptr = closure.count;
639   return ERR(NONE);
640 }
641 
642 // Walks up the stack 'n' callers, when used with Thread::WalkStack.
643 struct GetLocationVisitor : public art::StackVisitor {
GetLocationVisitoropenjdkjvmti::GetLocationVisitor644   GetLocationVisitor(art::Thread* thread, size_t n_in)
645       : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
646         n(n_in),
647         count(0),
648         caller(nullptr),
649         caller_dex_pc(0) {}
650 
VisitFrameopenjdkjvmti::GetLocationVisitor651   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
652     art::ArtMethod* m = GetMethod();
653     const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
654     if (do_count) {
655       DCHECK(caller == nullptr);
656       if (count == n) {
657         caller = m;
658         caller_dex_pc = GetDexPc(false);
659         return false;
660       }
661       count++;
662     }
663     return true;
664   }
665 
666   const size_t n;
667   size_t count;
668   art::ArtMethod* caller;
669   uint32_t caller_dex_pc;
670 };
671 
672 struct GetLocationClosure : public art::Closure {
673  public:
GetLocationClosureopenjdkjvmti::GetLocationClosure674   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
675 
Runopenjdkjvmti::GetLocationClosure676   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
677     GetLocationVisitor visitor(self, n);
678     visitor.WalkStack(false);
679 
680     method = visitor.caller;
681     dex_pc = visitor.caller_dex_pc;
682   }
683 
684   const size_t n;
685   art::ArtMethod* method;
686   uint32_t dex_pc;
687 };
688 
GetFrameLocation(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint depth,jmethodID * method_ptr,jlocation * location_ptr)689 jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
690                                        jthread java_thread,
691                                        jint depth,
692                                        jmethodID* method_ptr,
693                                        jlocation* location_ptr) {
694   art::Thread* thread;
695   jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
696   if (thread_error != ERR(NONE)) {
697     return thread_error;
698   }
699   DCHECK(thread != nullptr);
700 
701   if (depth < 0) {
702     return ERR(ILLEGAL_ARGUMENT);
703   }
704   if (method_ptr == nullptr || location_ptr == nullptr) {
705     return ERR(NULL_POINTER);
706   }
707 
708   GetLocationClosure closure(static_cast<size_t>(depth));
709   thread->RequestSynchronousCheckpoint(&closure);
710 
711   if (closure.method == nullptr) {
712     return ERR(NO_MORE_FRAMES);
713   }
714 
715   *method_ptr = art::jni::EncodeArtMethod(closure.method);
716   if (closure.method->IsNative()) {
717     *location_ptr = -1;
718   } else {
719     if (closure.dex_pc == art::DexFile::kDexNoIndex) {
720       return ERR(INTERNAL);
721     }
722     *location_ptr = static_cast<jlocation>(closure.dex_pc);
723   }
724 
725   return ERR(NONE);
726 }
727 
728 }  // namespace openjdkjvmti
729