1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/enums.h"
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/safe_map.h"
33 #include "base/value_object.h"
34 #include "entrypoints/jni/jni_entrypoints.h"
35 #include "entrypoints/quick/quick_entrypoints.h"
36 #include "handle.h"
37 #include "handle_scope.h"
38 #include "interpreter/interpreter_cache.h"
39 #include "javaheapprof/javaheapsampler.h"
40 #include "jvalue.h"
41 #include "managed_stack.h"
42 #include "offsets.h"
43 #include "read_barrier_config.h"
44 #include "reflective_handle_scope.h"
45 #include "runtime_globals.h"
46 #include "runtime_stats.h"
47 #include "thread_state.h"
48 
49 class BacktraceMap;
50 
51 namespace art {
52 
53 namespace gc {
54 namespace accounting {
55 template<class T> class AtomicStack;
56 }  // namespace accounting
57 namespace collector {
58 class SemiSpace;
59 }  // namespace collector
60 }  // namespace gc
61 
62 namespace instrumentation {
63 struct InstrumentationStackFrame;
64 }  // namespace instrumentation
65 
66 namespace mirror {
67 class Array;
68 class Class;
69 class ClassLoader;
70 class Object;
71 template<class T> class ObjectArray;
72 template<class T> class PrimitiveArray;
73 typedef PrimitiveArray<int32_t> IntArray;
74 class StackTraceElement;
75 class String;
76 class Throwable;
77 }  // namespace mirror
78 
79 namespace verifier {
80 class MethodVerifier;
81 class VerifierDeps;
82 }  // namespace verifier
83 
84 class ArtMethod;
85 class BaseMutex;
86 class ClassLinker;
87 class Closure;
88 class Context;
89 class DeoptimizationContextRecord;
90 class DexFile;
91 class FrameIdToShadowFrame;
92 class IsMarkedVisitor;
93 class JavaVMExt;
94 class JNIEnvExt;
95 class Monitor;
96 class RootVisitor;
97 class ScopedObjectAccessAlreadyRunnable;
98 class ShadowFrame;
99 class StackedShadowFrameRecord;
100 enum class SuspendReason : char;
101 class Thread;
102 class ThreadList;
103 enum VisitRootFlags : uint8_t;
104 
105 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
106 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
107 // on.
108 class TLSData {
109  public:
~TLSData()110   virtual ~TLSData() {}
111 };
112 
113 // Thread priorities. These must match the Thread.MIN_PRIORITY,
114 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
115 enum ThreadPriority {
116   kMinThreadPriority = 1,
117   kNormThreadPriority = 5,
118   kMaxThreadPriority = 10,
119 };
120 
121 enum ThreadFlag {
122   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
123                           // safepoint handler.
124   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
125   kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
126   kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
127 };
128 
129 enum class StackedShadowFrameType {
130   kShadowFrameUnderConstruction,
131   kDeoptimizationShadowFrame,
132 };
133 
134 // The type of method that triggers deoptimization. It contains info on whether
135 // the deoptimized method should advance dex_pc.
136 enum class DeoptimizationMethodType {
137   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
138   kDefault     // dex pc may or may not advance depending on other conditions.
139 };
140 
141 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
142 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
143 
144 // Thread's stack layout for implicit stack overflow checks:
145 //
146 //   +---------------------+  <- highest address of stack memory
147 //   |                     |
148 //   .                     .  <- SP
149 //   |                     |
150 //   |                     |
151 //   +---------------------+  <- stack_end
152 //   |                     |
153 //   |  Gap                |
154 //   |                     |
155 //   +---------------------+  <- stack_begin
156 //   |                     |
157 //   | Protected region    |
158 //   |                     |
159 //   +---------------------+  <- lowest address of stack memory
160 //
161 // The stack always grows down in memory.  At the lowest address is a region of memory
162 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
163 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
164 // between the stack_end and the highest address in stack memory.  An implicit stack
165 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
166 // If the thread's SP is below the stack_end address this will be a read into the protected
167 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
168 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
169 // if the thread makes a call out to a native function (through JNI), that native function
170 // might only have 4K of memory (if the SP is adjacent to stack_end).
171 
172 class Thread {
173  public:
174   static const size_t kStackOverflowImplicitCheckSize;
175   static constexpr bool kVerifyStack = kIsDebugBuild;
176 
177   // Creates a new native thread corresponding to the given managed peer.
178   // Used to implement Thread.start.
179   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
180 
181   // Attaches the calling native thread to the runtime, returning the new native peer.
182   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
183   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
184                         bool create_peer);
185   // Attaches the calling native thread to the runtime, returning the new native peer.
186   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
187 
188   // Reset internal state of child thread after fork.
189   void InitAfterFork();
190 
191   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
192   // high cost and so we favor passing self around when possible.
193   // TODO: mark as PURE so the compiler may coalesce and remove?
194   static Thread* Current();
195 
196   // On a runnable thread, check for pending thread suspension request and handle if pending.
197   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
198 
199   // Process pending thread suspension request and handle if pending.
200   void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
201 
202   // Process a pending empty checkpoint if pending.
203   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
204   void CheckEmptyCheckpointFromMutex();
205 
206   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
207                                    ObjPtr<mirror::Object> thread_peer)
208       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
209       REQUIRES_SHARED(Locks::mutator_lock_);
210   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
211       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
212       REQUIRES_SHARED(Locks::mutator_lock_);
213 
214   // Translates 172 to pAllocArrayFromCode and so on.
215   template<PointerSize size_of_pointers>
216   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
217 
218   // Dumps a one-line summary of thread state (used for operator<<).
219   void ShortDump(std::ostream& os) const;
220 
221   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
222   void Dump(std::ostream& os,
223             bool dump_native_stack = true,
224             BacktraceMap* backtrace_map = nullptr,
225             bool force_dump_stack = false) const
226       REQUIRES_SHARED(Locks::mutator_lock_);
227 
228   void DumpJavaStack(std::ostream& os,
229                      bool check_suspended = true,
230                      bool dump_locks = true) const
231       REQUIRES_SHARED(Locks::mutator_lock_);
232 
233   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
234   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
235   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
236       REQUIRES_SHARED(Locks::mutator_lock_);
237 
GetState()238   ThreadState GetState() const {
239     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
240     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
241     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
242   }
243 
244   ThreadState SetState(ThreadState new_state);
245 
GetSuspendCount()246   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
247     return tls32_.suspend_count;
248   }
249 
GetUserCodeSuspendCount()250   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
251                                                Locks::user_code_suspension_lock_) {
252     return tls32_.user_code_suspend_count;
253   }
254 
IsSuspended()255   bool IsSuspended() const {
256     union StateAndFlags state_and_flags;
257     state_and_flags.as_int = tls32_.state_and_flags.as_atomic_int.load(std::memory_order_relaxed);
258     return state_and_flags.as_struct.state != kRunnable &&
259         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
260   }
261 
DecrDefineClassCount()262   void DecrDefineClassCount() {
263     tls32_.define_class_counter--;
264   }
265 
IncrDefineClassCount()266   void IncrDefineClassCount() {
267     tls32_.define_class_counter++;
268   }
GetDefineClassCount()269   uint32_t GetDefineClassCount() const {
270     return tls32_.define_class_counter;
271   }
272 
273   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
274   // release thread_suspend_count_lock_ internally.
275   ALWAYS_INLINE
276   bool ModifySuspendCount(Thread* self,
277                           int delta,
278                           AtomicInteger* suspend_barrier,
279                           SuspendReason reason)
280       WARN_UNUSED
281       REQUIRES(Locks::thread_suspend_count_lock_);
282 
283   // Requests a checkpoint closure to run on another thread. The closure will be run when the
284   // thread notices the request, either in an explicit runtime CheckSuspend() call, or in a call
285   // originating from a compiler generated suspend point check. This returns true if the closure
286   // was added and will (eventually) be executed. It returns false otherwise.
287   //
288   // Since multiple closures can be queued and some closures can delay other threads from running,
289   // no closure should attempt to suspend another thread while running.
290   // TODO We should add some debug option that verifies this.
291   //
292   // This guarantees that the RequestCheckpoint invocation happens-before the function invocation:
293   // RequestCheckpointFunction holds thread_suspend_count_lock_, and RunCheckpointFunction
294   // acquires it.
295   bool RequestCheckpoint(Closure* function)
296       REQUIRES(Locks::thread_suspend_count_lock_);
297 
298   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
299   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
300   // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
301   // will go into while it is awaiting the checkpoint to be run.
302   // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
303   // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
304   // for the closure or the rest of the system.
305   // NB Since multiple closures can be queued and some closures can delay other threads from running
306   // no closure should attempt to suspend another thread while running.
307   bool RequestSynchronousCheckpoint(Closure* function,
308                                     ThreadState suspend_state = ThreadState::kWaiting)
309       REQUIRES_SHARED(Locks::mutator_lock_)
310       RELEASE(Locks::thread_list_lock_)
311       REQUIRES(!Locks::thread_suspend_count_lock_);
312 
313   bool RequestEmptyCheckpoint()
314       REQUIRES(Locks::thread_suspend_count_lock_);
315 
316   void SetFlipFunction(Closure* function);
317   Closure* GetFlipFunction();
318 
GetThreadLocalMarkStack()319   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
320     CHECK(kUseReadBarrier);
321     return tlsPtr_.thread_local_mark_stack;
322   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)323   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
324     CHECK(kUseReadBarrier);
325     tlsPtr_.thread_local_mark_stack = stack;
326   }
327 
328   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
329   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
330   void FullSuspendCheck()
331       REQUIRES(!Locks::thread_suspend_count_lock_)
332       REQUIRES_SHARED(Locks::mutator_lock_);
333 
334   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
335   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
336       REQUIRES(!Locks::thread_suspend_count_lock_)
337       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
338 
339   // Transition from runnable into a state where mutator privileges are denied. Releases share of
340   // mutator lock.
341   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
342       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
343       UNLOCK_FUNCTION(Locks::mutator_lock_);
344 
345   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)346   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
347     Roles::uninterruptible_.Acquire();  // No-op.
348     if (kIsDebugBuild) {
349       CHECK(cause != nullptr);
350       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
351       tls32_.no_thread_suspension++;
352       tlsPtr_.last_no_thread_suspension_cause = cause;
353       return previous_cause;
354     } else {
355       return nullptr;
356     }
357   }
358 
359   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)360   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
361     if (kIsDebugBuild) {
362       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
363       CHECK_GT(tls32_.no_thread_suspension, 0U);
364       tls32_.no_thread_suspension--;
365       tlsPtr_.last_no_thread_suspension_cause = old_cause;
366     }
367     Roles::uninterruptible_.Release();  // No-op.
368   }
369 
370   // End region where no thread suspension is expected. Returns the current open region in case we
371   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
372   // is larger than one.
EndAssertNoThreadSuspension()373   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
374     const char* ret = nullptr;
375     if (kIsDebugBuild) {
376       CHECK_EQ(tls32_.no_thread_suspension, 1u);
377       tls32_.no_thread_suspension--;
378       ret = tlsPtr_.last_no_thread_suspension_cause;
379       tlsPtr_.last_no_thread_suspension_cause = nullptr;
380     }
381     Roles::uninterruptible_.Release();  // No-op.
382     return ret;
383   }
384 
385   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
386 
387   // Return true if thread suspension is allowable.
388   bool IsThreadSuspensionAllowable() const;
389 
IsDaemon()390   bool IsDaemon() const {
391     return tls32_.daemon;
392   }
393 
394   size_t NumberOfHeldMutexes() const;
395 
396   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
397 
398   /*
399    * Changes the priority of this thread to match that of the java.lang.Thread object.
400    *
401    * We map a priority value from 1-10 to Linux "nice" values, where lower
402    * numbers indicate higher priority.
403    */
404   void SetNativePriority(int newPriority);
405 
406   /*
407    * Returns the priority of this thread by querying the system.
408    * This is useful when attaching a thread through JNI.
409    *
410    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
411    */
412   int GetNativePriority() const;
413 
414   // Guaranteed to be non-zero.
GetThreadId()415   uint32_t GetThreadId() const {
416     return tls32_.thin_lock_thread_id;
417   }
418 
GetTid()419   pid_t GetTid() const {
420     return tls32_.tid;
421   }
422 
423   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
424   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
425 
426   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
427   // allocation, or locking.
428   void GetThreadName(std::string& name) const;
429 
430   // Sets the thread's name.
431   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
432 
433   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
434   uint64_t GetCpuMicroTime() const;
435 
GetPeer()436   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
437     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
438     CHECK(tlsPtr_.jpeer == nullptr);
439     return tlsPtr_.opeer;
440   }
441   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
442   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
443   // This function will explicitly mark/forward it.
444   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
445 
HasPeer()446   bool HasPeer() const {
447     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
448   }
449 
GetStats()450   RuntimeStats* GetStats() {
451     return &tls64_.stats;
452   }
453 
454   bool IsStillStarting() const;
455 
IsExceptionPending()456   bool IsExceptionPending() const {
457     return tlsPtr_.exception != nullptr;
458   }
459 
IsAsyncExceptionPending()460   bool IsAsyncExceptionPending() const {
461     return tlsPtr_.async_exception != nullptr;
462   }
463 
GetException()464   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
465     return tlsPtr_.exception;
466   }
467 
468   void AssertPendingException() const;
469   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
470   void AssertNoPendingException() const;
471   void AssertNoPendingExceptionForNewException(const char* msg) const;
472 
473   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
474 
475   // Set an exception that is asynchronously thrown from a different thread. This will be checked
476   // periodically and might overwrite the current 'Exception'. This can only be called from a
477   // checkpoint.
478   //
479   // The caller should also make sure that the thread has been deoptimized so that the exception
480   // could be detected on back-edges.
481   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
482       REQUIRES_SHARED(Locks::mutator_lock_);
483 
ClearException()484   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
485     tlsPtr_.exception = nullptr;
486   }
487 
488   // Move the current async-exception to the main exception. This should be called when the current
489   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
490   // that needs to be dealt with, false otherwise.
491   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
492 
493   // Find catch block and perform long jump to appropriate exception handle
494   NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
495 
496   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)497   void ReleaseLongJumpContext(Context* context) {
498     if (tlsPtr_.long_jump_context != nullptr) {
499       ReleaseLongJumpContextInternal();
500     }
501     tlsPtr_.long_jump_context = context;
502   }
503 
504   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
505   // abort the runtime iff abort_on_error is true.
506   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
507                               bool check_suspended = true,
508                               bool abort_on_error = true) const
509       REQUIRES_SHARED(Locks::mutator_lock_);
510 
511   // Returns whether the given exception was thrown by the current Java method being executed
512   // (Note that this includes native Java methods).
513   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
514       REQUIRES_SHARED(Locks::mutator_lock_);
515 
SetTopOfStack(ArtMethod ** top_method)516   void SetTopOfStack(ArtMethod** top_method) {
517     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
518   }
519 
SetTopOfStackTagged(ArtMethod ** top_method)520   void SetTopOfStackTagged(ArtMethod** top_method) {
521     tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
522   }
523 
SetTopOfShadowStack(ShadowFrame * top)524   void SetTopOfShadowStack(ShadowFrame* top) {
525     tlsPtr_.managed_stack.SetTopShadowFrame(top);
526   }
527 
HasManagedStack()528   bool HasManagedStack() const {
529     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
530   }
531 
532   // If 'msg' is null, no detail message is set.
533   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
534       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
535 
536   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
537   // used as the new exception's cause.
538   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
539       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
540 
541   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
542       __attribute__((format(printf, 3, 4)))
543       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
544 
545   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
546       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
547 
548   // OutOfMemoryError is special, because we need to pre-allocate an instance.
549   // Only the GC should call this.
550   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
551       REQUIRES(!Roles::uninterruptible_);
552 
553   static void Startup();
554   static void FinishStartup();
555   static void Shutdown();
556 
557   // Notify this thread's thread-group that this thread has started.
558   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
559   //       is null, the thread's thread-group is loaded from the peer.
560   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
561       REQUIRES_SHARED(Locks::mutator_lock_);
562 
563   // JNI methods
GetJniEnv()564   JNIEnvExt* GetJniEnv() const {
565     return tlsPtr_.jni_env;
566   }
567 
568   // Convert a jobject into a Object*
569   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
570   // Checks if the weak global ref has been cleared by the GC without decoding it.
571   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
572 
GetMonitorEnterObject()573   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
574     return tlsPtr_.monitor_enter_object;
575   }
576 
SetMonitorEnterObject(mirror::Object * obj)577   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
578     tlsPtr_.monitor_enter_object = obj;
579   }
580 
581   // Implements java.lang.Thread.interrupted.
582   bool Interrupted();
583   // Implements java.lang.Thread.isInterrupted.
584   bool IsInterrupted();
585   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)586   void SetInterrupted(bool i) {
587     tls32_.interrupted.store(i, std::memory_order_seq_cst);
588   }
589   void Notify() REQUIRES(!wait_mutex_);
590 
PoisonObjectPointers()591   ALWAYS_INLINE void PoisonObjectPointers() {
592     ++poison_object_cookie_;
593   }
594 
595   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
596 
GetPoisonObjectCookie()597   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
598     return poison_object_cookie_;
599   }
600 
601   // Parking for 0ns of relative time means an untimed park, negative (though
602   // should be handled in java code) returns immediately
603   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
604   void Unpark();
605 
606  private:
607   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
608 
609  public:
GetWaitMutex()610   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
611     return wait_mutex_;
612   }
613 
GetWaitConditionVariable()614   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
615     return wait_cond_;
616   }
617 
GetWaitMonitor()618   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
619     return wait_monitor_;
620   }
621 
SetWaitMonitor(Monitor * mon)622   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
623     wait_monitor_ = mon;
624   }
625 
626   // Waiter link-list support.
GetWaitNext()627   Thread* GetWaitNext() const {
628     return tlsPtr_.wait_next;
629   }
630 
SetWaitNext(Thread * next)631   void SetWaitNext(Thread* next) {
632     tlsPtr_.wait_next = next;
633   }
634 
GetClassLoaderOverride()635   jobject GetClassLoaderOverride() {
636     return tlsPtr_.class_loader_override;
637   }
638 
639   void SetClassLoaderOverride(jobject class_loader_override);
640 
641   // Create the internal representation of a stack trace, that is more time
642   // and space efficient to compute than the StackTraceElement[].
643   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
644       REQUIRES_SHARED(Locks::mutator_lock_);
645 
646   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
647   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
648   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
649   // with the number of valid frames in the returned array.
650   static jobjectArray InternalStackTraceToStackTraceElementArray(
651       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
652       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
653       REQUIRES_SHARED(Locks::mutator_lock_);
654 
655   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
656       REQUIRES_SHARED(Locks::mutator_lock_);
657 
HasDebuggerShadowFrames()658   bool HasDebuggerShadowFrames() const {
659     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
660   }
661 
662   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
663       REQUIRES_SHARED(Locks::mutator_lock_);
664 
665   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
666       REQUIRES(Locks::mutator_lock_);
667 
VerifyStack()668   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
669     if (kVerifyStack) {
670       VerifyStackImpl();
671     }
672   }
673 
674   //
675   // Offsets of various members of native Thread class, used by compiled code.
676   //
677 
678   template<PointerSize pointer_size>
ThinLockIdOffset()679   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
680     return ThreadOffset<pointer_size>(
681         OFFSETOF_MEMBER(Thread, tls32_) +
682         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
683   }
684 
685   template<PointerSize pointer_size>
InterruptedOffset()686   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
687     return ThreadOffset<pointer_size>(
688         OFFSETOF_MEMBER(Thread, tls32_) +
689         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
690   }
691 
692   template<PointerSize pointer_size>
WeakRefAccessEnabledOffset()693   static constexpr ThreadOffset<pointer_size> WeakRefAccessEnabledOffset() {
694     return ThreadOffset<pointer_size>(
695         OFFSETOF_MEMBER(Thread, tls32_) +
696         OFFSETOF_MEMBER(tls_32bit_sized_values, weak_ref_access_enabled));
697   }
698 
699   template<PointerSize pointer_size>
ThreadFlagsOffset()700   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
701     return ThreadOffset<pointer_size>(
702         OFFSETOF_MEMBER(Thread, tls32_) +
703         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
704   }
705 
706   template<PointerSize pointer_size>
UseMterpOffset()707   static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
708     return ThreadOffset<pointer_size>(
709         OFFSETOF_MEMBER(Thread, tls32_) +
710         OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
711   }
712 
713   template<PointerSize pointer_size>
IsGcMarkingOffset()714   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
715     return ThreadOffset<pointer_size>(
716         OFFSETOF_MEMBER(Thread, tls32_) +
717         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
718   }
719 
IsGcMarkingSize()720   static constexpr size_t IsGcMarkingSize() {
721     return sizeof(tls32_.is_gc_marking);
722   }
723 
724   // Deoptimize the Java stack.
725   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
726 
727  private:
728   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)729   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
730     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
731     size_t scale = (pointer_size > kRuntimePointerSize) ?
732       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
733     size_t shrink = (kRuntimePointerSize > pointer_size) ?
734       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
735     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
736   }
737 
738  public:
739   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)740   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
741       size_t quick_entrypoint_offset) {
742     return ThreadOffsetFromTlsPtr<pointer_size>(
743         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
744   }
745 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)746   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
747                                                           PointerSize pointer_size) {
748     if (pointer_size == PointerSize::k32) {
749       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
750           Uint32Value();
751     } else {
752       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
753           Uint32Value();
754     }
755   }
756 
757   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)758   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
759     return ThreadOffsetFromTlsPtr<pointer_size>(
760         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
761   }
762 
763   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
764   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)765   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
766     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
767     DCHECK_LT(reg, 30u);
768     // The ReadBarrierMarkRegX entry points are ordered by increasing
769     // register number in Thread::tls_Ptr_.quick_entrypoints.
770     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
771         + static_cast<size_t>(pointer_size) * reg;
772   }
773 
774   template<PointerSize pointer_size>
SelfOffset()775   static constexpr ThreadOffset<pointer_size> SelfOffset() {
776     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
777   }
778 
779   template<PointerSize pointer_size>
MterpCurrentIBaseOffset()780   static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
781     return ThreadOffsetFromTlsPtr<pointer_size>(
782         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
783   }
784 
785   template<PointerSize pointer_size>
ExceptionOffset()786   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
787     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
788   }
789 
790   template<PointerSize pointer_size>
PeerOffset()791   static constexpr ThreadOffset<pointer_size> PeerOffset() {
792     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
793   }
794 
795 
796   template<PointerSize pointer_size>
CardTableOffset()797   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
798     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
799   }
800 
801   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()802   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
803     return ThreadOffsetFromTlsPtr<pointer_size>(
804         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
805   }
806 
807   template<PointerSize pointer_size>
ThreadLocalPosOffset()808   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
809     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
810                                                                 thread_local_pos));
811   }
812 
813   template<PointerSize pointer_size>
ThreadLocalEndOffset()814   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
815     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
816                                                                 thread_local_end));
817   }
818 
819   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()820   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
821     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
822                                                                 thread_local_objects));
823   }
824 
825   template<PointerSize pointer_size>
RosAllocRunsOffset()826   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
827     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
828                                                                 rosalloc_runs));
829   }
830 
831   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()832   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
833     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
834                                                                 thread_local_alloc_stack_top));
835   }
836 
837   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()838   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
839     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
840                                                                 thread_local_alloc_stack_end));
841   }
842 
843   // Size of stack less any space reserved for stack overflow
GetStackSize()844   size_t GetStackSize() const {
845     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
846   }
847 
848   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
849 
GetStackEnd()850   uint8_t* GetStackEnd() const {
851     return tlsPtr_.stack_end;
852   }
853 
854   // Set the stack end to that to be used during a stack overflow
855   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
856 
857   // Set the stack end to that to be used during regular execution
858   ALWAYS_INLINE void ResetDefaultStackEnd();
859 
IsHandlingStackOverflow()860   bool IsHandlingStackOverflow() const {
861     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
862   }
863 
864   template<PointerSize pointer_size>
StackEndOffset()865   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
866     return ThreadOffsetFromTlsPtr<pointer_size>(
867         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
868   }
869 
870   template<PointerSize pointer_size>
JniEnvOffset()871   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
872     return ThreadOffsetFromTlsPtr<pointer_size>(
873         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
874   }
875 
876   template<PointerSize pointer_size>
TopOfManagedStackOffset()877   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
878     return ThreadOffsetFromTlsPtr<pointer_size>(
879         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
880         ManagedStack::TaggedTopQuickFrameOffset());
881   }
882 
GetManagedStack()883   const ManagedStack* GetManagedStack() const {
884     return &tlsPtr_.managed_stack;
885   }
886 
887   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)888   void PushManagedStackFragment(ManagedStack* fragment) {
889     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
890   }
PopManagedStackFragment(const ManagedStack & fragment)891   void PopManagedStackFragment(const ManagedStack& fragment) {
892     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
893   }
894 
895   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
896   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
897 
898   template<PointerSize pointer_size>
TopShadowFrameOffset()899   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
900     return ThreadOffsetFromTlsPtr<pointer_size>(
901         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
902         ManagedStack::TopShadowFrameOffset());
903   }
904 
905   // Is the given obj in one of this thread's JNI transition frames?
906   bool IsJniTransitionReference(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
907 
908   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
909       REQUIRES_SHARED(Locks::mutator_lock_);
910 
GetTopHandleScope()911   BaseHandleScope* GetTopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
912     return tlsPtr_.top_handle_scope;
913   }
914 
PushHandleScope(BaseHandleScope * handle_scope)915   void PushHandleScope(BaseHandleScope* handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
916     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
917     tlsPtr_.top_handle_scope = handle_scope;
918   }
919 
PopHandleScope()920   BaseHandleScope* PopHandleScope() REQUIRES_SHARED(Locks::mutator_lock_) {
921     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
922     DCHECK(handle_scope != nullptr);
923     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
924     return handle_scope;
925   }
926 
927   template<PointerSize pointer_size>
TopHandleScopeOffset()928   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
929     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
930                                                                 top_handle_scope));
931   }
932 
GetTopReflectiveHandleScope()933   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
934     return tlsPtr_.top_reflective_handle_scope;
935   }
936 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)937   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
938     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
939     DCHECK_EQ(scope->GetThread(), this);
940     tlsPtr_.top_reflective_handle_scope = scope;
941   }
942 
PopReflectiveHandleScope()943   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
944     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
945     DCHECK(handle_scope != nullptr);
946     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
947     return handle_scope;
948   }
949 
950   // Indicates whether this thread is ready to invoke a method for debugging. This
951   // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()952   bool IsReadyForDebugInvoke() const {
953     return tls32_.ready_for_debug_invoke;
954   }
955 
SetReadyForDebugInvoke(bool ready)956   void SetReadyForDebugInvoke(bool ready) {
957     tls32_.ready_for_debug_invoke = ready;
958   }
959 
IsDebugMethodEntry()960   bool IsDebugMethodEntry() const {
961     return tls32_.debug_method_entry_;
962   }
963 
SetDebugMethodEntry()964   void SetDebugMethodEntry() {
965     tls32_.debug_method_entry_ = true;
966   }
967 
ClearDebugMethodEntry()968   void ClearDebugMethodEntry() {
969     tls32_.debug_method_entry_ = false;
970   }
971 
GetIsGcMarking()972   bool GetIsGcMarking() const {
973     CHECK(kUseReadBarrier);
974     return tls32_.is_gc_marking;
975   }
976 
977   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
978 
GetWeakRefAccessEnabled()979   bool GetWeakRefAccessEnabled() const {
980     CHECK(kUseReadBarrier);
981     return tls32_.weak_ref_access_enabled;
982   }
983 
SetWeakRefAccessEnabled(bool enabled)984   void SetWeakRefAccessEnabled(bool enabled) {
985     CHECK(kUseReadBarrier);
986     tls32_.weak_ref_access_enabled = enabled;
987   }
988 
GetDisableThreadFlipCount()989   uint32_t GetDisableThreadFlipCount() const {
990     CHECK(kUseReadBarrier);
991     return tls32_.disable_thread_flip_count;
992   }
993 
IncrementDisableThreadFlipCount()994   void IncrementDisableThreadFlipCount() {
995     CHECK(kUseReadBarrier);
996     ++tls32_.disable_thread_flip_count;
997   }
998 
DecrementDisableThreadFlipCount()999   void DecrementDisableThreadFlipCount() {
1000     CHECK(kUseReadBarrier);
1001     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
1002     --tls32_.disable_thread_flip_count;
1003   }
1004 
1005   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()1006   bool IsRuntimeThread() const {
1007     return is_runtime_thread_;
1008   }
1009 
SetIsRuntimeThread(bool is_runtime_thread)1010   void SetIsRuntimeThread(bool is_runtime_thread) {
1011     is_runtime_thread_ = is_runtime_thread;
1012   }
1013 
CorePlatformApiCookie()1014   uint32_t CorePlatformApiCookie() {
1015     return core_platform_api_cookie_;
1016   }
1017 
SetCorePlatformApiCookie(uint32_t cookie)1018   void SetCorePlatformApiCookie(uint32_t cookie) {
1019     core_platform_api_cookie_ = cookie;
1020   }
1021 
1022   // Returns true if the thread is allowed to load java classes.
1023   bool CanLoadClasses() const;
1024 
1025   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1026   static mirror::Throwable* GetDeoptimizationException() {
1027     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1028     // represented by ObjPtr.
1029     return reinterpret_cast<mirror::Throwable*>(0x100);
1030   }
1031 
1032   // Currently deoptimization invokes verifier which can trigger class loading
1033   // and execute Java code, so there might be nested deoptimizations happening.
1034   // We need to save the ongoing deoptimization shadow frames and return
1035   // values on stacks.
1036   // 'from_code' denotes whether the deoptimization was explicitly made from
1037   // compiled code.
1038   // 'method_type' contains info on whether deoptimization should advance
1039   // dex_pc.
1040   void PushDeoptimizationContext(const JValue& return_value,
1041                                  bool is_reference,
1042                                  ObjPtr<mirror::Throwable> exception,
1043                                  bool from_code,
1044                                  DeoptimizationMethodType method_type)
1045       REQUIRES_SHARED(Locks::mutator_lock_);
1046   void PopDeoptimizationContext(JValue* result,
1047                                 ObjPtr<mirror::Throwable>* exception,
1048                                 bool* from_code,
1049                                 DeoptimizationMethodType* method_type)
1050       REQUIRES_SHARED(Locks::mutator_lock_);
1051   void AssertHasDeoptimizationContext()
1052       REQUIRES_SHARED(Locks::mutator_lock_);
1053   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1054   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
1055 
1056   // For debugger, find the shadow frame that corresponds to a frame id.
1057   // Or return null if there is none.
1058   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1059       REQUIRES_SHARED(Locks::mutator_lock_);
1060   // For debugger, find the bool array that keeps track of the updated vreg set
1061   // for a frame id.
1062   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1063   // For debugger, find the shadow frame that corresponds to a frame id. If
1064   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1065   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1066                                                uint32_t num_vregs,
1067                                                ArtMethod* method,
1068                                                uint32_t dex_pc)
1069       REQUIRES_SHARED(Locks::mutator_lock_);
1070 
1071   // Delete the entry that maps from frame_id to shadow_frame.
1072   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1073       REQUIRES_SHARED(Locks::mutator_lock_);
1074 
1075   // While getting this map requires shared the mutator lock, manipulating it
1076   // should actually follow these rules:
1077   // (1) The owner of this map (the thread) can change it with its mutator lock.
1078   // (2) Other threads can read this map when the owner is suspended and they
1079   //     hold the mutator lock.
1080   // (3) Other threads can change this map when owning the mutator lock exclusively.
1081   //
1082   // The reason why (3) needs the mutator lock exclusively (and not just having
1083   // the owner suspended) is that we don't want other threads to concurrently read the map.
1084   //
1085   // TODO: Add a class abstraction to express these rules.
GetInstrumentationStack()1086   std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack()
1087       REQUIRES_SHARED(Locks::mutator_lock_) {
1088     return tlsPtr_.instrumentation_stack;
1089   }
1090 
GetStackTraceSample()1091   std::vector<ArtMethod*>* GetStackTraceSample() const {
1092     DCHECK(!IsAotCompiler());
1093     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1094   }
1095 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1096   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1097     DCHECK(!IsAotCompiler());
1098     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1099   }
1100 
GetVerifierDeps()1101   verifier::VerifierDeps* GetVerifierDeps() const {
1102     DCHECK(IsAotCompiler());
1103     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1104   }
1105 
1106   // It is the responsability of the caller to make sure the verifier_deps
1107   // entry in the thread is cleared before destruction of the actual VerifierDeps
1108   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1109   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1110     DCHECK(IsAotCompiler());
1111     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1112     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1113   }
1114 
GetTraceClockBase()1115   uint64_t GetTraceClockBase() const {
1116     return tls64_.trace_clock_base;
1117   }
1118 
SetTraceClockBase(uint64_t clock_base)1119   void SetTraceClockBase(uint64_t clock_base) {
1120     tls64_.trace_clock_base = clock_base;
1121   }
1122 
GetHeldMutex(LockLevel level)1123   BaseMutex* GetHeldMutex(LockLevel level) const {
1124     return tlsPtr_.held_mutexes[level];
1125   }
1126 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1127   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1128     tlsPtr_.held_mutexes[level] = mutex;
1129   }
1130 
1131   void ClearSuspendBarrier(AtomicInteger* target)
1132       REQUIRES(Locks::thread_suspend_count_lock_);
1133 
ReadFlag(ThreadFlag flag)1134   bool ReadFlag(ThreadFlag flag) const {
1135     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1136   }
1137 
TestAllFlags()1138   bool TestAllFlags() const {
1139     return (tls32_.state_and_flags.as_struct.flags != 0);
1140   }
1141 
AtomicSetFlag(ThreadFlag flag)1142   void AtomicSetFlag(ThreadFlag flag) {
1143     tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst);
1144   }
1145 
AtomicClearFlag(ThreadFlag flag)1146   void AtomicClearFlag(ThreadFlag flag) {
1147     tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
1148   }
1149 
UseMterp()1150   bool UseMterp() const {
1151     return tls32_.use_mterp.load();
1152   }
1153 
1154   void ResetQuickAllocEntryPointsForThread();
1155 
1156   // Returns the remaining space in the TLAB.
TlabSize()1157   size_t TlabSize() const {
1158     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1159   }
1160 
1161   // Returns pos offset from start.
GetTlabPosOffset()1162   size_t GetTlabPosOffset() const {
1163     return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
1164   }
1165 
1166   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1167   size_t TlabRemainingCapacity() const {
1168     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1169   }
1170 
1171   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1172   void ExpandTlab(size_t bytes) {
1173     tlsPtr_.thread_local_end += bytes;
1174     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1175   }
1176 
1177   // Doesn't check that there is room.
1178   mirror::Object* AllocTlab(size_t bytes);
1179   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1180   bool HasTlab() const;
1181   void ResetTlab();
GetTlabStart()1182   uint8_t* GetTlabStart() {
1183     return tlsPtr_.thread_local_start;
1184   }
GetTlabPos()1185   uint8_t* GetTlabPos() {
1186     return tlsPtr_.thread_local_pos;
1187   }
GetTlabEnd()1188   uint8_t* GetTlabEnd() {
1189     return tlsPtr_.thread_local_end;
1190   }
1191   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1192   // equal to a valid pointer.
1193   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1194   void RemoveSuspendTrigger() {
1195     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1196   }
1197 
1198   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1199   // The next time a suspend check is done, it will load from the value at this address
1200   // and trigger a SIGSEGV.
1201   // Only needed if Runtime::implicit_suspend_checks_ is true and fully implemented.  It currently
1202   // is always false. Client code currently just looks at the thread flags directly to determine
1203   // whether we should suspend, so this call is currently unnecessary.
TriggerSuspend()1204   void TriggerSuspend() {
1205     tlsPtr_.suspend_trigger = nullptr;
1206   }
1207 
1208 
1209   // Push an object onto the allocation stack.
1210   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1211       REQUIRES_SHARED(Locks::mutator_lock_);
1212 
1213   // Set the thread local allocation pointers to the given pointers.
1214   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1215                                      StackReference<mirror::Object>* end);
1216 
1217   // Resets the thread local allocation pointers.
1218   void RevokeThreadLocalAllocationStack();
1219 
GetThreadLocalBytesAllocated()1220   size_t GetThreadLocalBytesAllocated() const {
1221     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1222   }
1223 
GetThreadLocalObjectsAllocated()1224   size_t GetThreadLocalObjectsAllocated() const {
1225     return tlsPtr_.thread_local_objects;
1226   }
1227 
GetRosAllocRun(size_t index)1228   void* GetRosAllocRun(size_t index) const {
1229     return tlsPtr_.rosalloc_runs[index];
1230   }
1231 
SetRosAllocRun(size_t index,void * run)1232   void SetRosAllocRun(size_t index, void* run) {
1233     tlsPtr_.rosalloc_runs[index] = run;
1234   }
1235 
1236   bool ProtectStack(bool fatal_on_error = true);
1237   bool UnprotectStack();
1238 
SetMterpCurrentIBase(void * ibase)1239   void SetMterpCurrentIBase(void* ibase) {
1240     tlsPtr_.mterp_current_ibase = ibase;
1241   }
1242 
GetMterpCurrentIBase()1243   const void* GetMterpCurrentIBase() const {
1244     return tlsPtr_.mterp_current_ibase;
1245   }
1246 
HandlingSignal()1247   bool HandlingSignal() const {
1248     return tls32_.handling_signal_;
1249   }
1250 
SetHandlingSignal(bool handling_signal)1251   void SetHandlingSignal(bool handling_signal) {
1252     tls32_.handling_signal_ = handling_signal;
1253   }
1254 
IsTransitioningToRunnable()1255   bool IsTransitioningToRunnable() const {
1256     return tls32_.is_transitioning_to_runnable;
1257   }
1258 
SetIsTransitioningToRunnable(bool value)1259   void SetIsTransitioningToRunnable(bool value) {
1260     tls32_.is_transitioning_to_runnable = value;
1261   }
1262 
DecrementForceInterpreterCount()1263   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1264     return --tls32_.force_interpreter_count;
1265   }
1266 
IncrementForceInterpreterCount()1267   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1268     return ++tls32_.force_interpreter_count;
1269   }
1270 
SetForceInterpreterCount(uint32_t value)1271   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1272     tls32_.force_interpreter_count = value;
1273   }
1274 
ForceInterpreterCount()1275   uint32_t ForceInterpreterCount() const {
1276     return tls32_.force_interpreter_count;
1277   }
1278 
IsForceInterpreter()1279   bool IsForceInterpreter() const {
1280     return tls32_.force_interpreter_count != 0;
1281   }
1282 
IncrementMakeVisiblyInitializedCounter()1283   bool IncrementMakeVisiblyInitializedCounter() {
1284     tls32_.make_visibly_initialized_counter += 1u;
1285     return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
1286   }
1287 
ClearMakeVisiblyInitializedCounter()1288   void ClearMakeVisiblyInitializedCounter() {
1289     tls32_.make_visibly_initialized_counter = 0u;
1290   }
1291 
1292   void PushVerifier(verifier::MethodVerifier* verifier);
1293   void PopVerifier(verifier::MethodVerifier* verifier);
1294 
1295   void InitStringEntryPoints();
1296 
ModifyDebugDisallowReadBarrier(int8_t delta)1297   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1298     debug_disallow_read_barrier_ += delta;
1299   }
1300 
GetDebugDisallowReadBarrierCount()1301   uint8_t GetDebugDisallowReadBarrierCount() const {
1302     return debug_disallow_read_barrier_;
1303   }
1304 
1305   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1306   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1307   // it from being deleted.
1308   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1309 
1310   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1311   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1312   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1313 
1314   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1315   bool IsJitSensitiveThread() const {
1316     return this == jit_sensitive_thread_;
1317   }
1318 
1319   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1320 
1321   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1322   static bool IsSensitiveThread() {
1323     if (is_sensitive_thread_hook_ != nullptr) {
1324       return (*is_sensitive_thread_hook_)();
1325     }
1326     return false;
1327   }
1328 
1329   // Set to the read barrier marking entrypoints to be non-null.
1330   void SetReadBarrierEntrypoints();
1331 
1332   static jobject CreateCompileTimePeer(JNIEnv* env,
1333                                        const char* name,
1334                                        bool as_daemon,
1335                                        jobject thread_group)
1336       REQUIRES_SHARED(Locks::mutator_lock_);
1337 
GetInterpreterCache()1338   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1339     return &interpreter_cache_;
1340   }
1341 
1342   // Clear all thread-local interpreter caches.
1343   //
1344   // Since the caches are keyed by memory pointer to dex instructions, this must be
1345   // called when any dex code is unloaded (before different code gets loaded at the
1346   // same memory location).
1347   //
1348   // If presence of cache entry implies some pre-conditions, this must also be
1349   // called if the pre-conditions might no longer hold true.
1350   static void ClearAllInterpreterCaches();
1351 
1352   template<PointerSize pointer_size>
InterpreterCacheOffset()1353   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1354     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1355   }
1356 
InterpreterCacheSizeLog2()1357   static constexpr int InterpreterCacheSizeLog2() {
1358     return WhichPowerOf2(InterpreterCache::kSize);
1359   }
1360 
1361  private:
1362   explicit Thread(bool daemon);
1363   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1364   void Destroy();
1365 
1366   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1367   // observed to be set at the same time by instrumentation.
1368   void DeleteJPeer(JNIEnv* env);
1369 
1370   void NotifyInTheadList()
1371       REQUIRES_SHARED(Locks::thread_list_lock_);
1372 
1373   // Attaches the calling native thread to the runtime, returning the new native peer.
1374   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1375   template <typename PeerAction>
1376   static Thread* Attach(const char* thread_name,
1377                         bool as_daemon,
1378                         PeerAction p);
1379 
1380   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1381 
1382   template<bool kTransactionActive>
1383   static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1384                        ObjPtr<mirror::Object> peer,
1385                        jboolean thread_is_daemon,
1386                        jobject thread_group,
1387                        jobject thread_name,
1388                        jint thread_priority)
1389       REQUIRES_SHARED(Locks::mutator_lock_);
1390 
1391   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and, ~Thread
SetStateUnsafe(ThreadState new_state)1392   ThreadState SetStateUnsafe(ThreadState new_state) {
1393     ThreadState old_state = GetState();
1394     if (old_state == kRunnable && new_state != kRunnable) {
1395       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1396       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1397       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1398       TransitionToSuspendedAndRunCheckpoints(new_state);
1399       // Since we transitioned to a suspended state, check the pass barrier requests.
1400       PassActiveSuspendBarriers();
1401     } else {
1402       tls32_.state_and_flags.as_struct.state = new_state;
1403     }
1404     return old_state;
1405   }
1406 
1407   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1408 
1409   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1410   void DumpStack(std::ostream& os,
1411                  bool dump_native_stack = true,
1412                  BacktraceMap* backtrace_map = nullptr,
1413                  bool force_dump_stack = false) const
1414       REQUIRES_SHARED(Locks::mutator_lock_);
1415 
1416   // Out-of-line conveniences for debugging in gdb.
1417   static Thread* CurrentFromGdb();  // Like Thread::Current.
1418   // Like Thread::Dump(std::cerr).
1419   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1420 
1421   static void* CreateCallback(void* arg);
1422 
1423   void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1424       REQUIRES_SHARED(Locks::mutator_lock_);
1425   void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1426       REQUIRES_SHARED(Locks::mutator_lock_);
1427 
1428   // Initialize a thread.
1429   //
1430   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1431   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1432   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1433   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1434   // of false).
1435   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1436       REQUIRES(Locks::runtime_shutdown_lock_);
1437   void InitCardTable();
1438   void InitCpu();
1439   void CleanupCpu();
1440   void InitTlsEntryPoints();
1441   void InitTid();
1442   void InitPthreadKeySelf();
1443   bool InitStackHwm();
1444 
1445   void SetUpAlternateSignalStack();
1446   void TearDownAlternateSignalStack();
1447 
1448   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1449       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1450 
1451   ALWAYS_INLINE void PassActiveSuspendBarriers()
1452       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1453 
1454   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1455   static void SetJitSensitiveThread() {
1456     if (jit_sensitive_thread_ == nullptr) {
1457       jit_sensitive_thread_ = Thread::Current();
1458     } else {
1459       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1460           << Thread::Current()->GetTid();
1461     }
1462   }
1463 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1464   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1465     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1466   }
1467 
1468   bool ModifySuspendCountInternal(Thread* self,
1469                                   int delta,
1470                                   AtomicInteger* suspend_barrier,
1471                                   SuspendReason reason)
1472       WARN_UNUSED
1473       REQUIRES(Locks::thread_suspend_count_lock_);
1474 
1475   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1476   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1477   // the kCheckpointRequest flag is cleared.
1478   void RunCheckpointFunction() REQUIRES(!Locks::thread_suspend_count_lock_);
1479   void RunEmptyCheckpoint();
1480 
1481   bool PassActiveSuspendBarriers(Thread* self)
1482       REQUIRES(!Locks::thread_suspend_count_lock_);
1483 
1484   // Install the protected region for implicit stack checks.
1485   void InstallImplicitProtection();
1486 
1487   template <bool kPrecise>
1488   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1489 
1490   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1491 
1492   static bool IsAotCompiler();
1493 
1494   void ReleaseLongJumpContextInternal();
1495 
1496   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1497   // change from being Suspended to Runnable without a suspend request occurring.
1498   union PACKED(4) StateAndFlags {
StateAndFlags()1499     StateAndFlags() {}
1500     struct PACKED(4) {
1501       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1502       // ThreadFlag for bit field meanings.
1503       volatile uint16_t flags;
1504       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1505       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1506       // operation. If a thread is suspended and a suspend_request is present, a thread may not
1507       // change to Runnable as a GC or other operation is in progress.
1508       volatile uint16_t state;
1509     } as_struct;
1510     AtomicInteger as_atomic_int;
1511     volatile int32_t as_int;
1512 
1513    private:
1514     // gcc does not handle struct with volatile member assignments correctly.
1515     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1516     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1517   };
1518   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1519 
1520   // Format state and flags as a hex string. For diagnostic output.
1521   std::string StateAndFlagsAsHexString() const;
1522 
1523   static void ThreadExitCallback(void* arg);
1524 
1525   // Maximum number of suspend barriers.
1526   static constexpr uint32_t kMaxSuspendBarriers = 3;
1527 
1528   // Has Thread::Startup been called?
1529   static bool is_started_;
1530 
1531   // TLS key used to retrieve the Thread*.
1532   static pthread_key_t pthread_key_self_;
1533 
1534   // Used to notify threads that they should attempt to resume, they will suspend again if
1535   // their suspend count is > 0.
1536   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1537 
1538   // Hook passed by framework which returns true
1539   // when StrictMode events are traced for the current thread.
1540   static bool (*is_sensitive_thread_hook_)();
1541   // Stores the jit sensitive thread (which for now is the UI thread).
1542   static Thread* jit_sensitive_thread_;
1543 
1544   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
1545 
1546   /***********************************************************************************************/
1547   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1548   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1549   // first if possible.
1550   /***********************************************************************************************/
1551 
1552   struct PACKED(4) tls_32bit_sized_values {
1553     // We have no control over the size of 'bool', but want our boolean fields
1554     // to be 4-byte quantities.
1555     typedef uint32_t bool32_t;
1556 
tls_32bit_sized_valuestls_32bit_sized_values1557     explicit tls_32bit_sized_values(bool is_daemon)
1558         : suspend_count(0),
1559           thin_lock_thread_id(0),
1560           tid(0),
1561           daemon(is_daemon),
1562           throwing_OutOfMemoryError(false),
1563           no_thread_suspension(0),
1564           thread_exit_check_count(0),
1565           handling_signal_(false),
1566           is_transitioning_to_runnable(false),
1567           ready_for_debug_invoke(false),
1568           debug_method_entry_(false),
1569           is_gc_marking(false),
1570           weak_ref_access_enabled(true),
1571           disable_thread_flip_count(0),
1572           user_code_suspend_count(0),
1573           force_interpreter_count(0),
1574           use_mterp(0),
1575           make_visibly_initialized_counter(0),
1576           define_class_counter(0) {}
1577 
1578     union StateAndFlags state_and_flags;
1579     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1580                   "Size of state_and_flags and int32 are different");
1581 
1582     // A non-zero value is used to tell the current thread to enter a safe point
1583     // at the next poll.
1584     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1585 
1586     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1587     // This is not to be confused with the native thread's tid, nor is it the value returned
1588     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1589     // important difference between this id and the ids visible to managed code is that these
1590     // ones get reused (to ensure that they fit in the number of bits available).
1591     uint32_t thin_lock_thread_id;
1592 
1593     // System thread id.
1594     uint32_t tid;
1595 
1596     // Is the thread a daemon?
1597     const bool32_t daemon;
1598 
1599     // A boolean telling us whether we're recursively throwing OOME.
1600     bool32_t throwing_OutOfMemoryError;
1601 
1602     // A positive value implies we're in a region where thread suspension isn't expected.
1603     uint32_t no_thread_suspension;
1604 
1605     // How many times has our pthread key's destructor been called?
1606     uint32_t thread_exit_check_count;
1607 
1608     // True if signal is being handled by this thread.
1609     bool32_t handling_signal_;
1610 
1611     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1612     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1613     // the rest of them.
1614     bool32_t is_transitioning_to_runnable;
1615 
1616     // True if the thread has been suspended by a debugger event. This is
1617     // used to invoke method from the debugger which is only allowed when
1618     // the thread is suspended by an event.
1619     bool32_t ready_for_debug_invoke;
1620 
1621     // True if the thread enters a method. This is used to detect method entry
1622     // event for the debugger.
1623     bool32_t debug_method_entry_;
1624 
1625     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1626     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1627     // GC roots.
1628     bool32_t is_gc_marking;
1629 
1630     // Thread "interrupted" status; stays raised until queried or thrown.
1631     Atomic<bool32_t> interrupted;
1632 
1633     AtomicInteger park_state_;
1634 
1635     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1636     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1637     // processing of the CC collector only. This is thread local so that we can enable/disable weak
1638     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1639     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1640     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1641     // ReferenceProcessor::EnableSlowPath().
1642     bool32_t weak_ref_access_enabled;
1643 
1644     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1645     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1646     // critical section enter.
1647     uint32_t disable_thread_flip_count;
1648 
1649     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1650     // suspended by the runtime from those suspended by user code.
1651     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1652     // told that AssertHeld should be good enough.
1653     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1654 
1655     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1656     // thread must remain in interpreted code as much as possible.
1657     uint32_t force_interpreter_count;
1658 
1659     // True if everything is in the ideal state for fast interpretation.
1660     // False if we need to switch to the C++ interpreter to handle special cases.
1661     std::atomic<bool32_t> use_mterp;
1662 
1663     // Counter for calls to initialize a class that's initialized but not visibly initialized.
1664     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
1665     // make initialized classes visibly initialized. This is needed because we usually make
1666     // classes visibly initialized in batches but we do not want to be stuck with a class
1667     // initialized but not visibly initialized for a long time even if no more classes are
1668     // being initialized anymore.
1669     uint32_t make_visibly_initialized_counter;
1670 
1671     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
1672     // for threads to be done with class-definition work.
1673     uint32_t define_class_counter;
1674   } tls32_;
1675 
1676   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1677     tls_64bit_sized_values() : trace_clock_base(0) {
1678     }
1679 
1680     // The clock base used for tracing.
1681     uint64_t trace_clock_base;
1682 
1683     RuntimeStats stats;
1684   } tls64_;
1685 
PACKED(sizeof (void *))1686   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1687       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1688       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1689       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1690       deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1691       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1692       instrumentation_stack(nullptr),
1693       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1694       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1695       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1696       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1697       thread_local_limit(nullptr),
1698       thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1699       thread_local_alloc_stack_end(nullptr),
1700       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
1701       async_exception(nullptr), top_reflective_handle_scope(nullptr) {
1702       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1703     }
1704 
1705     // The biased card table, see CardTable for details.
1706     uint8_t* card_table;
1707 
1708     // The pending exception or null.
1709     mirror::Throwable* exception;
1710 
1711     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1712     // We leave extra space so there's room for the code that throws StackOverflowError.
1713     uint8_t* stack_end;
1714 
1715     // The top of the managed stack often manipulated directly by compiler generated code.
1716     ManagedStack managed_stack;
1717 
1718     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1719     // normally set to the address of itself.
1720     uintptr_t* suspend_trigger;
1721 
1722     // Every thread may have an associated JNI environment
1723     JNIEnvExt* jni_env;
1724 
1725     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1726     // created thread.
1727     JNIEnvExt* tmp_jni_env;
1728 
1729     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1730     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1731     // Thread::Current to give the address.
1732     Thread* self;
1733 
1734     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1735     // start up, until the thread is registered and the local opeer_ is used.
1736     mirror::Object* opeer;
1737     jobject jpeer;
1738 
1739     // The "lowest addressable byte" of the stack.
1740     uint8_t* stack_begin;
1741 
1742     // Size of the stack.
1743     size_t stack_size;
1744 
1745     // Sampling profiler and AOT verification cannot happen on the same run, so we share
1746     // the same entry for the stack trace and the verifier deps.
1747     union DepsOrStackTraceSample {
1748       DepsOrStackTraceSample() {
1749         verifier_deps = nullptr;
1750         stack_trace_sample = nullptr;
1751       }
1752       // Pointer to previous stack trace captured by sampling profiler.
1753       std::vector<ArtMethod*>* stack_trace_sample;
1754       // When doing AOT verification, per-thread VerifierDeps.
1755       verifier::VerifierDeps* verifier_deps;
1756     } deps_or_stack_trace_sample;
1757 
1758     // The next thread in the wait set this thread is part of or null if not waiting.
1759     Thread* wait_next;
1760 
1761     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1762     mirror::Object* monitor_enter_object;
1763 
1764     // Top of linked list of handle scopes or null for none.
1765     BaseHandleScope* top_handle_scope;
1766 
1767     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1768     // useful for testing.
1769     jobject class_loader_override;
1770 
1771     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1772     Context* long_jump_context;
1773 
1774     // Additional stack used by method instrumentation to store method and return pc values.
1775     // Stored as a pointer since std::map is not PACKED.
1776     // !DO NOT CHANGE! to std::unordered_map: the users of this map require an
1777     // ordered iteration on the keys (which are stack addresses).
1778     // Also see Thread::GetInstrumentationStack for the requirements on
1779     // manipulating and reading this map.
1780     std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1781 
1782     // For gc purpose, a shadow frame record stack that keeps track of:
1783     // 1) shadow frames under construction.
1784     // 2) deoptimization shadow frames.
1785     StackedShadowFrameRecord* stacked_shadow_frame_record;
1786 
1787     // Deoptimization return value record stack.
1788     DeoptimizationContextRecord* deoptimization_context_stack;
1789 
1790     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1791     // Shadow frames may be created before deoptimization happens so that the debugger can
1792     // set local values there first.
1793     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1794 
1795     // A cached copy of the java.lang.Thread's name.
1796     std::string* name;
1797 
1798     // A cached pthread_t for the pthread underlying this Thread*.
1799     pthread_t pthread_self;
1800 
1801     // If no_thread_suspension_ is > 0, what is causing that assertion.
1802     const char* last_no_thread_suspension_cause;
1803 
1804     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1805     // requests another checkpoint, it goes to the checkpoint overflow list.
1806     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1807 
1808     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1809     // Locks::thread_suspend_count_lock_.
1810     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1811     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1812     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1813 
1814     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1815     uint8_t* thread_local_start;
1816 
1817     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1818     // potentially better performance.
1819     uint8_t* thread_local_pos;
1820     uint8_t* thread_local_end;
1821 
1822     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1823     // equal to thread_local_end.
1824     uint8_t* thread_local_limit;
1825 
1826     size_t thread_local_objects;
1827 
1828     // Entrypoint function pointers.
1829     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1830     JniEntryPoints jni_entrypoints;
1831     QuickEntryPoints quick_entrypoints;
1832 
1833     // Mterp jump table base.
1834     void* mterp_current_ibase;
1835 
1836     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1837     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1838 
1839     // Thread-local allocation stack data/routines.
1840     StackReference<mirror::Object>* thread_local_alloc_stack_top;
1841     StackReference<mirror::Object>* thread_local_alloc_stack_end;
1842 
1843     // Support for Mutex lock hierarchy bug detection.
1844     BaseMutex* held_mutexes[kLockLevelCount];
1845 
1846     // The function used for thread flip.
1847     Closure* flip_function;
1848 
1849     // Current method verifier, used for root marking.
1850     verifier::MethodVerifier* method_verifier;
1851 
1852     // Thread-local mark stack for the concurrent copying collector.
1853     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1854 
1855     // The pending async-exception or null.
1856     mirror::Throwable* async_exception;
1857 
1858     // Top of the linked-list for reflective-handle scopes or null if none.
1859     BaseReflectiveHandleScope* top_reflective_handle_scope;
1860   } tlsPtr_;
1861 
1862   // Small thread-local cache to be used from the interpreter.
1863   // It is keyed by dex instruction pointer.
1864   // The value is opcode-depended (e.g. field offset).
1865   InterpreterCache interpreter_cache_;
1866 
1867   // All fields below this line should not be accessed by native code. This means these fields can
1868   // be modified, rearranged, added or removed without having to modify asm_support.h
1869 
1870   // Guards the 'wait_monitor_' members.
1871   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1872 
1873   // Condition variable waited upon during a wait.
1874   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1875   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1876   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1877 
1878   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1879   uint8_t debug_disallow_read_barrier_ = 0;
1880 
1881   // Note that it is not in the packed struct, may not be accessed for cross compilation.
1882   uintptr_t poison_object_cookie_ = 0;
1883 
1884   // Pending extra checkpoints if checkpoint_function_ is already used.
1885   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1886 
1887   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
1888   // compiled code or entrypoints.
1889   SafeMap<std::string, std::unique_ptr<TLSData>, std::less<>> custom_tls_
1890       GUARDED_BY(Locks::custom_tls_lock_);
1891 
1892 #ifndef __BIONIC__
1893   __attribute__((tls_model("initial-exec")))
1894   static thread_local Thread* self_tls_;
1895 #endif
1896 
1897   // True if the thread is some form of runtime thread (ex, GC or JIT).
1898   bool is_runtime_thread_;
1899 
1900   // Set during execution of JNI methods that get field and method id's as part of determining if
1901   // the caller is allowed to access all fields and methods in the Core Platform API.
1902   uint32_t core_platform_api_cookie_ = 0;
1903 
1904   friend class gc::collector::SemiSpace;  // For getting stack traces.
1905   friend class Runtime;  // For CreatePeer.
1906   friend class QuickExceptionHandler;  // For dumping the stack.
1907   friend class ScopedThreadStateChange;
1908   friend class StubTest;  // For accessing entrypoints.
1909   friend class ThreadList;  // For ~Thread and Destroy.
1910 
1911   friend class EntrypointsOrderTest;  // To test the order of tls entries.
1912   friend class JniCompilerTest;  // For intercepting JNI entrypoint calls.
1913 
1914   DISALLOW_COPY_AND_ASSIGN(Thread);
1915 };
1916 
1917 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1918  public:
1919   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
1920                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)1921       ACQUIRE(Roles::uninterruptible_)
1922       : enabled_(enabled) {
1923     if (!enabled_) {
1924       return;
1925     }
1926     if (kIsDebugBuild) {
1927       self_ = Thread::Current();
1928       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1929     } else {
1930       Roles::uninterruptible_.Acquire();  // No-op.
1931     }
1932   }
~ScopedAssertNoThreadSuspension()1933   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1934     if (!enabled_) {
1935       return;
1936     }
1937     if (kIsDebugBuild) {
1938       self_->EndAssertNoThreadSuspension(old_cause_);
1939     } else {
1940       Roles::uninterruptible_.Release();  // No-op.
1941     }
1942   }
1943 
1944  private:
1945   Thread* self_;
1946   const bool enabled_;
1947   const char* old_cause_;
1948 };
1949 
1950 class ScopedAllowThreadSuspension {
1951  public:
ScopedAllowThreadSuspension()1952   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
1953     if (kIsDebugBuild) {
1954       self_ = Thread::Current();
1955       old_cause_ = self_->EndAssertNoThreadSuspension();
1956     } else {
1957       Roles::uninterruptible_.Release();  // No-op.
1958     }
1959   }
~ScopedAllowThreadSuspension()1960   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
1961     if (kIsDebugBuild) {
1962       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
1963     } else {
1964       Roles::uninterruptible_.Acquire();  // No-op.
1965     }
1966   }
1967 
1968  private:
1969   Thread* self_;
1970   const char* old_cause_;
1971 };
1972 
1973 
1974 class ScopedStackedShadowFramePusher {
1975  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1976   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1977     : self_(self), type_(type) {
1978     self_->PushStackedShadowFrame(sf, type);
1979   }
~ScopedStackedShadowFramePusher()1980   ~ScopedStackedShadowFramePusher() {
1981     self_->PopStackedShadowFrame(type_);
1982   }
1983 
1984  private:
1985   Thread* const self_;
1986   const StackedShadowFrameType type_;
1987 
1988   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1989 };
1990 
1991 // Only works for debug builds.
1992 class ScopedDebugDisallowReadBarriers {
1993  public:
ScopedDebugDisallowReadBarriers(Thread * self)1994   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1995     self_->ModifyDebugDisallowReadBarrier(1);
1996   }
~ScopedDebugDisallowReadBarriers()1997   ~ScopedDebugDisallowReadBarriers() {
1998     self_->ModifyDebugDisallowReadBarrier(-1);
1999   }
2000 
2001  private:
2002   Thread* const self_;
2003 };
2004 
2005 class ScopedTransitioningToRunnable : public ValueObject {
2006  public:
ScopedTransitioningToRunnable(Thread * self)2007   explicit ScopedTransitioningToRunnable(Thread* self)
2008       : self_(self) {
2009     DCHECK_EQ(self, Thread::Current());
2010     if (kUseReadBarrier) {
2011       self_->SetIsTransitioningToRunnable(true);
2012     }
2013   }
2014 
~ScopedTransitioningToRunnable()2015   ~ScopedTransitioningToRunnable() {
2016     if (kUseReadBarrier) {
2017       self_->SetIsTransitioningToRunnable(false);
2018     }
2019   }
2020 
2021  private:
2022   Thread* const self_;
2023 };
2024 
2025 class ThreadLifecycleCallback {
2026  public:
~ThreadLifecycleCallback()2027   virtual ~ThreadLifecycleCallback() {}
2028 
2029   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2030   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2031 };
2032 
2033 // Store an exception from the thread and suppress it for the duration of this object.
2034 class ScopedExceptionStorage {
2035  public:
2036   explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2037   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2038   ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2039 
2040  private:
2041   Thread* self_;
2042   StackHandleScope<1> hs_;
2043   MutableHandle<mirror::Throwable> excp_;
2044 };
2045 
2046 std::ostream& operator<<(std::ostream& os, const Thread& thread);
2047 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2048 
2049 }  // namespace art
2050 
2051 #endif  // ART_RUNTIME_THREAD_H_
2052