1 /* Copyright (C) 2017 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32 #include <functional>
33 #include <iosfwd>
34 #include <mutex>
35
36 #include "deopt_manager.h"
37
38 #include "art_jvmti.h"
39 #include "art_method-inl.h"
40 #include "base/enums.h"
41 #include "base/mutex-inl.h"
42 #include "dex/dex_file_annotations.h"
43 #include "dex/modifiers.h"
44 #include "events-inl.h"
45 #include "gc/collector_type.h"
46 #include "gc/heap.h"
47 #include "gc/scoped_gc_critical_section.h"
48 #include "instrumentation.h"
49 #include "jit/jit.h"
50 #include "jni/jni_internal.h"
51 #include "mirror/class-inl.h"
52 #include "mirror/object_array-inl.h"
53 #include "nativehelper/scoped_local_ref.h"
54 #include "read_barrier_config.h"
55 #include "runtime_callbacks.h"
56 #include "scoped_thread_state_change-inl.h"
57 #include "scoped_thread_state_change.h"
58 #include "thread-current-inl.h"
59 #include "thread_list.h"
60 #include "ti_phase.h"
61
62 namespace openjdkjvmti {
63
64 // TODO We should make this much more selective in the future so we only return true when we
65 // actually care about the method at this time (ie active frames had locals changed). For now we
66 // just assume that if anything has changed any frame's locals we care about all methods. If nothing
67 // has we only care about methods with active breakpoints on them. In the future we should probably
68 // rewrite all of this to instead do this at the ShadowFrame or thread granularity.
IsMethodBeingInspected(art::ArtMethod * method)69 bool JvmtiMethodInspectionCallback::IsMethodBeingInspected(art::ArtMethod* method) {
70 // Non-java-debuggable runtimes we need to assume that any method might not be debuggable and
71 // therefore potentially being inspected (due to inlines). If we are debuggable we rely hard on
72 // inlining not being done since we don't keep track of which methods get inlined where and simply
73 // look to see if the method is breakpointed.
74 return !art::Runtime::Current()->IsJavaDebuggable() ||
75 manager_->HaveLocalsChanged() ||
76 manager_->MethodHasBreakpoints(method);
77 }
78
IsMethodSafeToJit(art::ArtMethod * method)79 bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
80 return !manager_->MethodHasBreakpoints(method);
81 }
82
MethodNeedsDebugVersion(art::ArtMethod * method ATTRIBUTE_UNUSED)83 bool JvmtiMethodInspectionCallback::MethodNeedsDebugVersion(
84 art::ArtMethod* method ATTRIBUTE_UNUSED) {
85 return true;
86 }
87
DeoptManager()88 DeoptManager::DeoptManager()
89 : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock",
90 static_cast<art::LockLevel>(
91 art::LockLevel::kClassLinkerClassesLock + 1)),
92 deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
93 performing_deoptimization_(false),
94 global_deopt_count_(0),
95 deopter_count_(0),
96 breakpoint_status_lock_("JVMTI_BreakpointStatusLock",
97 static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1)),
98 inspection_callback_(this),
99 set_local_variable_called_(false) { }
100
Setup()101 void DeoptManager::Setup() {
102 art::ScopedThreadStateChange stsc(art::Thread::Current(),
103 art::ThreadState::kWaitingForDebuggerToAttach);
104 art::ScopedSuspendAll ssa("Add method Inspection Callback");
105 art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
106 callbacks->AddMethodInspectionCallback(&inspection_callback_);
107 }
108
Shutdown()109 void DeoptManager::Shutdown() {
110 art::ScopedThreadStateChange stsc(art::Thread::Current(),
111 art::ThreadState::kWaitingForDebuggerToAttach);
112 art::ScopedSuspendAll ssa("remove method Inspection Callback");
113 art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
114 callbacks->RemoveMethodInspectionCallback(&inspection_callback_);
115 }
116
DumpDeoptInfo(art::Thread * self,std::ostream & stream)117 void DeoptManager::DumpDeoptInfo(art::Thread* self, std::ostream& stream) {
118 art::ScopedObjectAccess soa(self);
119 art::MutexLock mutll(self, *art::Locks::thread_list_lock_);
120 art::MutexLock mudsl(self, deoptimization_status_lock_);
121 art::MutexLock mubsl(self, breakpoint_status_lock_);
122 stream << "Deoptimizer count: " << deopter_count_ << "\n";
123 stream << "Global deopt count: " << global_deopt_count_ << "\n";
124 stream << "Can perform OSR: " << !set_local_variable_called_.load() << "\n";
125 for (const auto& [bp, loc] : this->breakpoint_status_) {
126 stream << "Breakpoint: " << bp->PrettyMethod() << " @ 0x" << std::hex << loc << "\n";
127 }
128 struct DumpThreadDeoptCount : public art::Closure {
129 public:
130 DumpThreadDeoptCount(std::ostream& stream, std::mutex& mu)
131 : cnt_(0), stream_(stream), mu_(mu) {}
132 void Run(art::Thread* self) override {
133 {
134 std::lock_guard<std::mutex> lg(mu_);
135 std::string name;
136 self->GetThreadName(name);
137 stream_ << "Thread " << name << " (id: " << std::dec << self->GetThreadId()
138 << ") force interpreter count " << self->ForceInterpreterCount() << "\n";
139 }
140 // Increment this after unlocking the mutex so we won't race its destructor.
141 cnt_++;
142 }
143
144 void WaitForCount(size_t threads) {
145 while (cnt_.load() != threads) {
146 sched_yield();
147 }
148 }
149
150 private:
151 std::atomic<size_t> cnt_;
152 std::ostream& stream_;
153 std::mutex& mu_;
154 };
155
156 std::mutex mu;
157 DumpThreadDeoptCount dtdc(stream, mu);
158 auto func = [](art::Thread* thread, void* ctx) {
159 reinterpret_cast<DumpThreadDeoptCount*>(ctx)->Run(thread);
160 };
161 art::Runtime::Current()->GetThreadList()->ForEach(func, &dtdc);
162 }
163
FinishSetup()164 void DeoptManager::FinishSetup() {
165 art::Thread* self = art::Thread::Current();
166 art::MutexLock mu(self, deoptimization_status_lock_);
167
168 art::Runtime* runtime = art::Runtime::Current();
169 // See if we need to do anything.
170 if (!runtime->IsJavaDebuggable()) {
171 // See if we can enable all JVMTI functions. If this is false, only kArtTiVersion agents can be
172 // retrieved and they will all be best-effort.
173 if (PhaseUtil::GetPhaseUnchecked() == JVMTI_PHASE_ONLOAD) {
174 // We are still early enough to change the compiler options and get full JVMTI support.
175 LOG(INFO) << "Openjdkjvmti plugin loaded on a non-debuggable runtime. Changing runtime to "
176 << "debuggable state. Please pass '--debuggable' to dex2oat and "
177 << "'-Xcompiler-option --debuggable' to dalvikvm in the future.";
178 DCHECK(runtime->GetJit() == nullptr) << "Jit should not be running yet!";
179 runtime->AddCompilerOption("--debuggable");
180 runtime->SetJavaDebuggable(true);
181 } else {
182 LOG(WARNING) << "Openjdkjvmti plugin was loaded on a non-debuggable Runtime. Plugin was "
183 << "loaded too late to change runtime state to DEBUGGABLE. Only kArtTiVersion "
184 << "(0x" << std::hex << kArtTiVersion << ") environments are available. Some "
185 << "functionality might not work properly.";
186 if (runtime->GetJit() == nullptr &&
187 runtime->GetJITOptions()->UseJitCompilation() &&
188 !runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
189 // If we don't have a jit we should try to start the jit for performance reasons. We only
190 // need to do this for late attach on non-debuggable processes because for debuggable
191 // processes we already rely on jit and we cannot force this jit to start if we are still in
192 // OnLoad since the runtime hasn't started up sufficiently. This is only expected to happen
193 // on userdebug/eng builds.
194 LOG(INFO) << "Attempting to start jit for openjdkjvmti plugin.";
195 // Note: use rwx allowed = true, because if this is the system server, we will not be
196 // allowed to allocate any JIT code cache, anyways.
197 runtime->CreateJitCodeCache(/*rwx_memory_allowed=*/true);
198 runtime->CreateJit();
199 if (runtime->GetJit() == nullptr) {
200 LOG(WARNING) << "Could not start jit for openjdkjvmti plugin. This process might be "
201 << "quite slow as it is running entirely in the interpreter. Try running "
202 << "'setenforce 0' and restarting this process.";
203 }
204 }
205 }
206 runtime->DeoptimizeBootImage();
207 }
208 }
209
MethodHasBreakpoints(art::ArtMethod * method)210 bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
211 art::MutexLock lk(art::Thread::Current(), breakpoint_status_lock_);
212 return MethodHasBreakpointsLocked(method);
213 }
214
MethodHasBreakpointsLocked(art::ArtMethod * method)215 bool DeoptManager::MethodHasBreakpointsLocked(art::ArtMethod* method) {
216 auto elem = breakpoint_status_.find(method);
217 return elem != breakpoint_status_.end() && elem->second != 0;
218 }
219
RemoveDeoptimizeAllMethods()220 void DeoptManager::RemoveDeoptimizeAllMethods() {
221 art::Thread* self = art::Thread::Current();
222 art::ScopedThreadSuspension sts(self, art::kSuspended);
223 deoptimization_status_lock_.ExclusiveLock(self);
224 RemoveDeoptimizeAllMethodsLocked(self);
225 }
226
AddDeoptimizeAllMethods()227 void DeoptManager::AddDeoptimizeAllMethods() {
228 art::Thread* self = art::Thread::Current();
229 art::ScopedThreadSuspension sts(self, art::kSuspended);
230 deoptimization_status_lock_.ExclusiveLock(self);
231 AddDeoptimizeAllMethodsLocked(self);
232 }
233
AddMethodBreakpoint(art::ArtMethod * method)234 void DeoptManager::AddMethodBreakpoint(art::ArtMethod* method) {
235 DCHECK(method->IsInvokable());
236 DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
237 DCHECK(!method->IsNative()) << method->PrettyMethod();
238
239 art::Thread* self = art::Thread::Current();
240 method = method->GetCanonicalMethod();
241 bool is_default = method->IsDefault();
242
243 art::ScopedThreadSuspension sts(self, art::kSuspended);
244 deoptimization_status_lock_.ExclusiveLock(self);
245 {
246 breakpoint_status_lock_.ExclusiveLock(self);
247
248 DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
249
250 if (MethodHasBreakpointsLocked(method)) {
251 // Don't need to do anything extra.
252 breakpoint_status_[method]++;
253 // Another thread might be deoptimizing the very method we just added new breakpoints for.
254 // Wait for any deopts to finish before moving on.
255 breakpoint_status_lock_.ExclusiveUnlock(self);
256 WaitForDeoptimizationToFinish(self);
257 return;
258 }
259 breakpoint_status_[method] = 1;
260 breakpoint_status_lock_.ExclusiveUnlock(self);
261 }
262 auto instrumentation = art::Runtime::Current()->GetInstrumentation();
263 if (instrumentation->IsForcedInterpretOnly()) {
264 // We are already interpreting everything so no need to do anything.
265 deoptimization_status_lock_.ExclusiveUnlock(self);
266 return;
267 } else if (is_default) {
268 AddDeoptimizeAllMethodsLocked(self);
269 } else {
270 PerformLimitedDeoptimization(self, method);
271 }
272 }
273
RemoveMethodBreakpoint(art::ArtMethod * method)274 void DeoptManager::RemoveMethodBreakpoint(art::ArtMethod* method) {
275 DCHECK(method->IsInvokable()) << method->PrettyMethod();
276 DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
277 DCHECK(!method->IsNative()) << method->PrettyMethod();
278
279 art::Thread* self = art::Thread::Current();
280 method = method->GetCanonicalMethod();
281 bool is_default = method->IsDefault();
282
283 art::ScopedThreadSuspension sts(self, art::kSuspended);
284 // Ideally we should do a ScopedSuspendAll right here to get the full mutator_lock_ that we might
285 // need but since that is very heavy we will instead just use a condition variable to make sure we
286 // don't race with ourselves.
287 deoptimization_status_lock_.ExclusiveLock(self);
288 bool is_last_breakpoint;
289 {
290 art::MutexLock mu(self, breakpoint_status_lock_);
291
292 DCHECK_GT(deopter_count_, 0u) << "unexpected deotpimization request";
293 DCHECK(MethodHasBreakpointsLocked(method)) << "Breakpoint on a method was removed without "
294 << "breakpoints present!";
295 breakpoint_status_[method] -= 1;
296 is_last_breakpoint = (breakpoint_status_[method] == 0);
297 }
298 auto instrumentation = art::Runtime::Current()->GetInstrumentation();
299 if (UNLIKELY(instrumentation->IsForcedInterpretOnly())) {
300 // We don't need to do anything since we are interpreting everything anyway.
301 deoptimization_status_lock_.ExclusiveUnlock(self);
302 return;
303 } else if (is_last_breakpoint) {
304 if (UNLIKELY(is_default)) {
305 RemoveDeoptimizeAllMethodsLocked(self);
306 } else {
307 PerformLimitedUndeoptimization(self, method);
308 }
309 } else {
310 // Another thread might be deoptimizing the very methods we just removed breakpoints from. Wait
311 // for any deopts to finish before moving on.
312 WaitForDeoptimizationToFinish(self);
313 }
314 }
315
WaitForDeoptimizationToFinishLocked(art::Thread * self)316 void DeoptManager::WaitForDeoptimizationToFinishLocked(art::Thread* self) {
317 while (performing_deoptimization_) {
318 deoptimization_condition_.Wait(self);
319 }
320 }
321
WaitForDeoptimizationToFinish(art::Thread * self)322 void DeoptManager::WaitForDeoptimizationToFinish(art::Thread* self) {
323 WaitForDeoptimizationToFinishLocked(self);
324 deoptimization_status_lock_.ExclusiveUnlock(self);
325 }
326
327 // Users should make sure that only gc-critical-section safe code is used while a
328 // ScopedDeoptimizationContext exists.
329 class ScopedDeoptimizationContext : public art::ValueObject {
330 public:
ScopedDeoptimizationContext(art::Thread * self,DeoptManager * deopt)331 ScopedDeoptimizationContext(art::Thread* self, DeoptManager* deopt)
332 RELEASE(deopt->deoptimization_status_lock_)
333 ACQUIRE(art::Locks::mutator_lock_)
334 ACQUIRE(art::Roles::uninterruptible_)
335 : self_(self),
336 deopt_(deopt),
337 critical_section_(self_, "JVMTI Deoptimizing methods"),
338 uninterruptible_cause_(nullptr) {
339 deopt_->WaitForDeoptimizationToFinishLocked(self_);
340 DCHECK(!deopt->performing_deoptimization_)
341 << "Already performing deoptimization on another thread!";
342 // Use performing_deoptimization_ to keep track of the lock.
343 deopt_->performing_deoptimization_ = true;
344 deopt_->deoptimization_status_lock_.Unlock(self_);
345 uninterruptible_cause_ = critical_section_.Enter(art::gc::kGcCauseInstrumentation,
346 art::gc::kCollectorTypeCriticalSection);
347 art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
348 /*long_suspend=*/ false);
349 }
350
351 ~ScopedDeoptimizationContext()
RELEASE(art::Locks::mutator_lock_)352 RELEASE(art::Locks::mutator_lock_)
353 RELEASE(art::Roles::uninterruptible_) {
354 // Can be suspended again.
355 critical_section_.Exit(uninterruptible_cause_);
356 // Release the mutator lock.
357 art::Runtime::Current()->GetThreadList()->ResumeAll();
358 // Let other threads know it's fine to proceed.
359 art::MutexLock lk(self_, deopt_->deoptimization_status_lock_);
360 deopt_->performing_deoptimization_ = false;
361 deopt_->deoptimization_condition_.Broadcast(self_);
362 }
363
364 private:
365 art::Thread* self_;
366 DeoptManager* deopt_;
367 art::gc::GCCriticalSection critical_section_;
368 const char* uninterruptible_cause_;
369 };
370
AddDeoptimizeAllMethodsLocked(art::Thread * self)371 void DeoptManager::AddDeoptimizeAllMethodsLocked(art::Thread* self) {
372 global_deopt_count_++;
373 if (global_deopt_count_ == 1) {
374 PerformGlobalDeoptimization(self);
375 } else {
376 WaitForDeoptimizationToFinish(self);
377 }
378 }
379
RemoveDeoptimizeAllMethodsLocked(art::Thread * self)380 void DeoptManager::RemoveDeoptimizeAllMethodsLocked(art::Thread* self) {
381 DCHECK_GT(global_deopt_count_, 0u) << "Request to remove non-existent global deoptimization!";
382 global_deopt_count_--;
383 if (global_deopt_count_ == 0) {
384 PerformGlobalUndeoptimization(self);
385 } else {
386 WaitForDeoptimizationToFinish(self);
387 }
388 }
389
PerformLimitedDeoptimization(art::Thread * self,art::ArtMethod * method)390 void DeoptManager::PerformLimitedDeoptimization(art::Thread* self, art::ArtMethod* method) {
391 ScopedDeoptimizationContext sdc(self, this);
392 art::Runtime::Current()->GetInstrumentation()->Deoptimize(method);
393 }
394
PerformLimitedUndeoptimization(art::Thread * self,art::ArtMethod * method)395 void DeoptManager::PerformLimitedUndeoptimization(art::Thread* self, art::ArtMethod* method) {
396 ScopedDeoptimizationContext sdc(self, this);
397 art::Runtime::Current()->GetInstrumentation()->Undeoptimize(method);
398 }
399
PerformGlobalDeoptimization(art::Thread * self)400 void DeoptManager::PerformGlobalDeoptimization(art::Thread* self) {
401 ScopedDeoptimizationContext sdc(self, this);
402 art::Runtime::Current()->GetInstrumentation()->DeoptimizeEverything(
403 kDeoptManagerInstrumentationKey);
404 }
405
PerformGlobalUndeoptimization(art::Thread * self)406 void DeoptManager::PerformGlobalUndeoptimization(art::Thread* self) {
407 ScopedDeoptimizationContext sdc(self, this);
408 art::Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything(
409 kDeoptManagerInstrumentationKey);
410 }
411
AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked & soa,jthread jtarget)412 jvmtiError DeoptManager::AddDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
413 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
414 art::Thread* target = nullptr;
415 jvmtiError err = OK;
416 if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
417 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
418 return err;
419 }
420 // We don't need additional locking here because we hold the Thread_list_lock_.
421 if (target->IncrementForceInterpreterCount() == 1) {
422 struct DeoptClosure : public art::Closure {
423 public:
424 explicit DeoptClosure(DeoptManager* manager) : manager_(manager) {}
425 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
426 manager_->DeoptimizeThread(self);
427 }
428
429 private:
430 DeoptManager* manager_;
431 };
432 DeoptClosure c(this);
433 target->RequestSynchronousCheckpoint(&c);
434 } else {
435 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
436 }
437 return OK;
438 }
439
RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked & soa,jthread jtarget)440 jvmtiError DeoptManager::RemoveDeoptimizeThreadMethods(art::ScopedObjectAccessUnchecked& soa, jthread jtarget) {
441 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
442 art::Thread* target = nullptr;
443 jvmtiError err = OK;
444 if (!ThreadUtil::GetNativeThread(jtarget, soa, &target, &err)) {
445 return err;
446 }
447 // We don't need additional locking here because we hold the Thread_list_lock_.
448 DCHECK_GT(target->ForceInterpreterCount(), 0u);
449 target->DecrementForceInterpreterCount();
450 return OK;
451 }
452
RemoveDeoptimizationRequester()453 void DeoptManager::RemoveDeoptimizationRequester() {
454 art::Thread* self = art::Thread::Current();
455 art::ScopedThreadStateChange sts(self, art::kSuspended);
456 deoptimization_status_lock_.ExclusiveLock(self);
457 DCHECK_GT(deopter_count_, 0u) << "Removing deoptimization requester without any being present";
458 deopter_count_--;
459 if (deopter_count_ == 0) {
460 ScopedDeoptimizationContext sdc(self, this);
461 // TODO Give this a real key.
462 art::Runtime::Current()->GetInstrumentation()->DisableDeoptimization("");
463 return;
464 } else {
465 deoptimization_status_lock_.ExclusiveUnlock(self);
466 }
467 }
468
AddDeoptimizationRequester()469 void DeoptManager::AddDeoptimizationRequester() {
470 art::Thread* self = art::Thread::Current();
471 art::ScopedThreadStateChange stsc(self, art::kSuspended);
472 deoptimization_status_lock_.ExclusiveLock(self);
473 deopter_count_++;
474 if (deopter_count_ == 1) {
475 ScopedDeoptimizationContext sdc(self, this);
476 art::instrumentation::Instrumentation* instrumentation =
477 art::Runtime::Current()->GetInstrumentation();
478 // Enable deoptimization
479 instrumentation->EnableDeoptimization();
480 // Tell instrumentation we will be deopting single threads.
481 instrumentation->EnableSingleThreadDeopt();
482 } else {
483 deoptimization_status_lock_.ExclusiveUnlock(self);
484 }
485 }
486
DeoptimizeThread(art::Thread * target)487 void DeoptManager::DeoptimizeThread(art::Thread* target) {
488 // We might or might not be running on the target thread (self) so get Thread::Current
489 // directly.
490 art::ScopedThreadSuspension sts(art::Thread::Current(), art::kSuspended);
491 art::gc::ScopedGCCriticalSection sgccs(art::Thread::Current(),
492 art::gc::GcCause::kGcCauseDebugger,
493 art::gc::CollectorType::kCollectorTypeDebugger);
494 art::ScopedSuspendAll ssa("Instrument thread stack");
495 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
496 }
497
498 extern DeoptManager* gDeoptManager;
Get()499 DeoptManager* DeoptManager::Get() {
500 return gDeoptManager;
501 }
502
503 } // namespace openjdkjvmti
504