• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2011 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  #ifndef ART_RUNTIME_THREAD_H_
18  #define ART_RUNTIME_THREAD_H_
19  
20  #include <atomic>
21  #include <bitset>
22  #include <deque>
23  #include <iosfwd>
24  #include <list>
25  #include <memory>
26  #include <string>
27  
28  #include "base/atomic.h"
29  #include "base/enums.h"
30  #include "base/locks.h"
31  #include "base/macros.h"
32  #include "base/safe_map.h"
33  #include "base/value_object.h"
34  #include "entrypoints/jni/jni_entrypoints.h"
35  #include "entrypoints/quick/quick_entrypoints.h"
36  #include "handle_scope.h"
37  #include "interpreter/interpreter_cache.h"
38  #include "jvalue.h"
39  #include "managed_stack.h"
40  #include "offsets.h"
41  #include "read_barrier_config.h"
42  #include "runtime_globals.h"
43  #include "runtime_stats.h"
44  #include "suspend_reason.h"
45  #include "thread_state.h"
46  
47  class BacktraceMap;
48  
49  namespace art {
50  
51  namespace gc {
52  namespace accounting {
53  template<class T> class AtomicStack;
54  }  // namespace accounting
55  namespace collector {
56  class SemiSpace;
57  }  // namespace collector
58  }  // namespace gc
59  
60  namespace instrumentation {
61  struct InstrumentationStackFrame;
62  }  // namespace instrumentation
63  
64  namespace mirror {
65  class Array;
66  class Class;
67  class ClassLoader;
68  class Object;
69  template<class T> class ObjectArray;
70  template<class T> class PrimitiveArray;
71  typedef PrimitiveArray<int32_t> IntArray;
72  class StackTraceElement;
73  class String;
74  class Throwable;
75  }  // namespace mirror
76  
77  namespace verifier {
78  class MethodVerifier;
79  class VerifierDeps;
80  }  // namespace verifier
81  
82  class ArtMethod;
83  class BaseMutex;
84  class ClassLinker;
85  class Closure;
86  class Context;
87  struct DebugInvokeReq;
88  class DeoptimizationContextRecord;
89  class DexFile;
90  class FrameIdToShadowFrame;
91  class JavaVMExt;
92  class JNIEnvExt;
93  class Monitor;
94  class RootVisitor;
95  class ScopedObjectAccessAlreadyRunnable;
96  class ShadowFrame;
97  class SingleStepControl;
98  class StackedShadowFrameRecord;
99  class Thread;
100  class ThreadList;
101  enum VisitRootFlags : uint8_t;
102  
103  // A piece of data that can be held in the CustomTls. The destructor will be called during thread
104  // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
105  // on.
106  class TLSData {
107   public:
~TLSData()108    virtual ~TLSData() {}
109  };
110  
111  // Thread priorities. These must match the Thread.MIN_PRIORITY,
112  // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
113  enum ThreadPriority {
114    kMinThreadPriority = 1,
115    kNormThreadPriority = 5,
116    kMaxThreadPriority = 10,
117  };
118  
119  enum ThreadFlag {
120    kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
121                            // safepoint handler.
122    kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
123    kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
124    kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
125  };
126  
127  enum class StackedShadowFrameType {
128    kShadowFrameUnderConstruction,
129    kDeoptimizationShadowFrame,
130  };
131  
132  // The type of method that triggers deoptimization. It contains info on whether
133  // the deoptimized method should advance dex_pc.
134  enum class DeoptimizationMethodType {
135    kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
136    kDefault     // dex pc may or may not advance depending on other conditions.
137  };
138  
139  // This should match RosAlloc::kNumThreadLocalSizeBrackets.
140  static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
141  
142  // Thread's stack layout for implicit stack overflow checks:
143  //
144  //   +---------------------+  <- highest address of stack memory
145  //   |                     |
146  //   .                     .  <- SP
147  //   |                     |
148  //   |                     |
149  //   +---------------------+  <- stack_end
150  //   |                     |
151  //   |  Gap                |
152  //   |                     |
153  //   +---------------------+  <- stack_begin
154  //   |                     |
155  //   | Protected region    |
156  //   |                     |
157  //   +---------------------+  <- lowest address of stack memory
158  //
159  // The stack always grows down in memory.  At the lowest address is a region of memory
160  // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
161  // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
162  // between the stack_end and the highest address in stack memory.  An implicit stack
163  // overflow check is a read of memory at a certain offset below the current SP (4K typically).
164  // If the thread's SP is below the stack_end address this will be a read into the protected
165  // region.  If the SP is above the stack_end address, the thread is guaranteed to have
166  // at least 4K of space.  Because stack overflow checks are only performed in generated code,
167  // if the thread makes a call out to a native function (through JNI), that native function
168  // might only have 4K of memory (if the SP is adjacent to stack_end).
169  
170  class Thread {
171   public:
172    static const size_t kStackOverflowImplicitCheckSize;
173    static constexpr bool kVerifyStack = kIsDebugBuild;
174  
175    // Creates a new native thread corresponding to the given managed peer.
176    // Used to implement Thread.start.
177    static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
178  
179    // Attaches the calling native thread to the runtime, returning the new native peer.
180    // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
181    static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
182                          bool create_peer);
183    // Attaches the calling native thread to the runtime, returning the new native peer.
184    static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
185  
186    // Reset internal state of child thread after fork.
187    void InitAfterFork();
188  
189    // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
190    // high cost and so we favor passing self around when possible.
191    // TODO: mark as PURE so the compiler may coalesce and remove?
192    static Thread* Current();
193  
194    // On a runnable thread, check for pending thread suspension request and handle if pending.
195    void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
196  
197    // Process pending thread suspension request and handle if pending.
198    void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
199  
200    // Process a pending empty checkpoint if pending.
201    void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
202    void CheckEmptyCheckpointFromMutex();
203  
204    static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
205                                     ObjPtr<mirror::Object> thread_peer)
206        REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
207        REQUIRES_SHARED(Locks::mutator_lock_);
208    static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
209        REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
210        REQUIRES_SHARED(Locks::mutator_lock_);
211  
212    // Translates 172 to pAllocArrayFromCode and so on.
213    template<PointerSize size_of_pointers>
214    static void DumpThreadOffset(std::ostream& os, uint32_t offset);
215  
216    // Dumps a one-line summary of thread state (used for operator<<).
217    void ShortDump(std::ostream& os) const;
218  
219    // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
220    void Dump(std::ostream& os,
221              bool dump_native_stack = true,
222              BacktraceMap* backtrace_map = nullptr,
223              bool force_dump_stack = false) const
224        REQUIRES_SHARED(Locks::mutator_lock_);
225  
226    void DumpJavaStack(std::ostream& os,
227                       bool check_suspended = true,
228                       bool dump_locks = true) const
229        REQUIRES_SHARED(Locks::mutator_lock_);
230  
231    // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
232    // case we use 'tid' to identify the thread, and we'll include as much information as we can.
233    static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
234        REQUIRES_SHARED(Locks::mutator_lock_);
235  
GetState()236    ThreadState GetState() const {
237      DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
238      DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
239      return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
240    }
241  
242    ThreadState SetState(ThreadState new_state);
243  
GetSuspendCount()244    int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
245      return tls32_.suspend_count;
246    }
247  
GetUserCodeSuspendCount()248    int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
249                                                 Locks::user_code_suspension_lock_) {
250      return tls32_.user_code_suspend_count;
251    }
252  
GetDebugSuspendCount()253    int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
254      return tls32_.debug_suspend_count;
255    }
256  
IsSuspended()257    bool IsSuspended() const {
258      union StateAndFlags state_and_flags;
259      state_and_flags.as_int = tls32_.state_and_flags.as_int;
260      return state_and_flags.as_struct.state != kRunnable &&
261          (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
262    }
263  
264    // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
265    // release thread_suspend_count_lock_ internally.
266    ALWAYS_INLINE
267    bool ModifySuspendCount(Thread* self,
268                            int delta,
269                            AtomicInteger* suspend_barrier,
270                            SuspendReason reason)
271        WARN_UNUSED
272        REQUIRES(Locks::thread_suspend_count_lock_);
273  
274    // Requests a checkpoint closure to run on another thread. The closure will be run when the thread
275    // gets suspended. This will return true if the closure was added and will (eventually) be
276    // executed. It returns false otherwise.
277    //
278    // Since multiple closures can be queued and some closures can delay other threads from running no
279    // closure should attempt to suspend another thread while running.
280    // TODO We should add some debug option that verifies this.
281    bool RequestCheckpoint(Closure* function)
282        REQUIRES(Locks::thread_suspend_count_lock_);
283  
284    // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
285    // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
286    // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
287    // will go into while it is awaiting the checkpoint to be run.
288    // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
289    // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
290    // for the closure or the rest of the system.
291    // NB Since multiple closures can be queued and some closures can delay other threads from running
292    // no closure should attempt to suspend another thread while running.
293    bool RequestSynchronousCheckpoint(Closure* function,
294                                      ThreadState suspend_state = ThreadState::kWaiting)
295        REQUIRES_SHARED(Locks::mutator_lock_)
296        RELEASE(Locks::thread_list_lock_)
297        REQUIRES(!Locks::thread_suspend_count_lock_);
298  
299    bool RequestEmptyCheckpoint()
300        REQUIRES(Locks::thread_suspend_count_lock_);
301  
302    void SetFlipFunction(Closure* function);
303    Closure* GetFlipFunction();
304  
GetThreadLocalMarkStack()305    gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
306      CHECK(kUseReadBarrier);
307      return tlsPtr_.thread_local_mark_stack;
308    }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)309    void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
310      CHECK(kUseReadBarrier);
311      tlsPtr_.thread_local_mark_stack = stack;
312    }
313  
314    // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
315    // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
316    void FullSuspendCheck()
317        REQUIRES(!Locks::thread_suspend_count_lock_)
318        REQUIRES_SHARED(Locks::mutator_lock_);
319  
320    // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
321    ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
322        REQUIRES(!Locks::thread_suspend_count_lock_)
323        SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
324  
325    // Transition from runnable into a state where mutator privileges are denied. Releases share of
326    // mutator lock.
327    ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
328        REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
329        UNLOCK_FUNCTION(Locks::mutator_lock_);
330  
331    // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)332    const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
333      Roles::uninterruptible_.Acquire();  // No-op.
334      if (kIsDebugBuild) {
335        CHECK(cause != nullptr);
336        const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
337        tls32_.no_thread_suspension++;
338        tlsPtr_.last_no_thread_suspension_cause = cause;
339        return previous_cause;
340      } else {
341        return nullptr;
342      }
343    }
344  
345    // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)346    void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
347      if (kIsDebugBuild) {
348        CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
349        CHECK_GT(tls32_.no_thread_suspension, 0U);
350        tls32_.no_thread_suspension--;
351        tlsPtr_.last_no_thread_suspension_cause = old_cause;
352      }
353      Roles::uninterruptible_.Release();  // No-op.
354    }
355  
356    void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
357  
358    // Return true if thread suspension is allowable.
359    bool IsThreadSuspensionAllowable() const;
360  
IsDaemon()361    bool IsDaemon() const {
362      return tls32_.daemon;
363    }
364  
365    size_t NumberOfHeldMutexes() const;
366  
367    bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
368  
369    /*
370     * Changes the priority of this thread to match that of the java.lang.Thread object.
371     *
372     * We map a priority value from 1-10 to Linux "nice" values, where lower
373     * numbers indicate higher priority.
374     */
375    void SetNativePriority(int newPriority);
376  
377    /*
378     * Returns the thread priority for the current thread by querying the system.
379     * This is useful when attaching a thread through JNI.
380     *
381     * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
382     */
383    static int GetNativePriority();
384  
385    // Guaranteed to be non-zero.
GetThreadId()386    uint32_t GetThreadId() const {
387      return tls32_.thin_lock_thread_id;
388    }
389  
GetTid()390    pid_t GetTid() const {
391      return tls32_.tid;
392    }
393  
394    // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
395    ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
396  
397    // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
398    // allocation, or locking.
399    void GetThreadName(std::string& name) const;
400  
401    // Sets the thread's name.
402    void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
403  
404    // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
405    uint64_t GetCpuMicroTime() const;
406  
GetPeer()407    mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
408      DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
409      CHECK(tlsPtr_.jpeer == nullptr);
410      return tlsPtr_.opeer;
411    }
412    // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
413    // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
414    // This function will explicitly mark/forward it.
415    mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
416  
HasPeer()417    bool HasPeer() const {
418      return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
419    }
420  
GetStats()421    RuntimeStats* GetStats() {
422      return &tls64_.stats;
423    }
424  
425    bool IsStillStarting() const;
426  
IsExceptionPending()427    bool IsExceptionPending() const {
428      return tlsPtr_.exception != nullptr;
429    }
430  
IsAsyncExceptionPending()431    bool IsAsyncExceptionPending() const {
432      return tlsPtr_.async_exception != nullptr;
433    }
434  
GetException()435    mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
436      return tlsPtr_.exception;
437    }
438  
439    void AssertPendingException() const;
440    void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
441    void AssertNoPendingException() const;
442    void AssertNoPendingExceptionForNewException(const char* msg) const;
443  
444    void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
445  
446    // Set an exception that is asynchronously thrown from a different thread. This will be checked
447    // periodically and might overwrite the current 'Exception'. This can only be called from a
448    // checkpoint.
449    //
450    // The caller should also make sure that the thread has been deoptimized so that the exception
451    // could be detected on back-edges.
452    void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
453        REQUIRES_SHARED(Locks::mutator_lock_);
454  
ClearException()455    void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
456      tlsPtr_.exception = nullptr;
457    }
458  
459    // Move the current async-exception to the main exception. This should be called when the current
460    // thread is ready to deal with any async exceptions. Returns true if there is an async exception
461    // that needs to be dealt with, false otherwise.
462    bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
463  
464    // Find catch block and perform long jump to appropriate exception handle
465    NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
466  
467    Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)468    void ReleaseLongJumpContext(Context* context) {
469      if (tlsPtr_.long_jump_context != nullptr) {
470        ReleaseLongJumpContextInternal();
471      }
472      tlsPtr_.long_jump_context = context;
473    }
474  
475    // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
476    // abort the runtime iff abort_on_error is true.
477    ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
478                                bool check_suspended = true,
479                                bool abort_on_error = true) const
480        REQUIRES_SHARED(Locks::mutator_lock_);
481  
482    // Returns whether the given exception was thrown by the current Java method being executed
483    // (Note that this includes native Java methods).
484    bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
485        REQUIRES_SHARED(Locks::mutator_lock_);
486  
SetTopOfStack(ArtMethod ** top_method)487    void SetTopOfStack(ArtMethod** top_method) {
488      tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
489    }
490  
SetTopOfStackTagged(ArtMethod ** top_method)491    void SetTopOfStackTagged(ArtMethod** top_method) {
492      tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
493    }
494  
SetTopOfShadowStack(ShadowFrame * top)495    void SetTopOfShadowStack(ShadowFrame* top) {
496      tlsPtr_.managed_stack.SetTopShadowFrame(top);
497    }
498  
HasManagedStack()499    bool HasManagedStack() const {
500      return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
501    }
502  
503    // If 'msg' is null, no detail message is set.
504    void ThrowNewException(const char* exception_class_descriptor, const char* msg)
505        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
506  
507    // If 'msg' is null, no detail message is set. An exception must be pending, and will be
508    // used as the new exception's cause.
509    void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
510        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
511  
512    void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
513        __attribute__((format(printf, 3, 4)))
514        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
515  
516    void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
517        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
518  
519    // OutOfMemoryError is special, because we need to pre-allocate an instance.
520    // Only the GC should call this.
521    void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
522        REQUIRES(!Roles::uninterruptible_);
523  
524    static void Startup();
525    static void FinishStartup();
526    static void Shutdown();
527  
528    // Notify this thread's thread-group that this thread has started.
529    // Note: the given thread-group is used as a fast path and verified in debug build. If the value
530    //       is null, the thread's thread-group is loaded from the peer.
531    void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
532        REQUIRES_SHARED(Locks::mutator_lock_);
533  
534    // JNI methods
GetJniEnv()535    JNIEnvExt* GetJniEnv() const {
536      return tlsPtr_.jni_env;
537    }
538  
539    // Convert a jobject into a Object*
540    ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
541    // Checks if the weak global ref has been cleared by the GC without decoding it.
542    bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
543  
GetMonitorEnterObject()544    mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
545      return tlsPtr_.monitor_enter_object;
546    }
547  
SetMonitorEnterObject(mirror::Object * obj)548    void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
549      tlsPtr_.monitor_enter_object = obj;
550    }
551  
552    // Implements java.lang.Thread.interrupted.
553    bool Interrupted();
554    // Implements java.lang.Thread.isInterrupted.
555    bool IsInterrupted();
556    void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)557    void SetInterrupted(bool i) {
558      tls32_.interrupted.store(i, std::memory_order_seq_cst);
559    }
560    void Notify() REQUIRES(!wait_mutex_);
561  
PoisonObjectPointers()562    ALWAYS_INLINE void PoisonObjectPointers() {
563      ++poison_object_cookie_;
564    }
565  
566    ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
567  
GetPoisonObjectCookie()568    ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
569      return poison_object_cookie_;
570    }
571  
572    // Parking for 0ns of relative time means an untimed park, negative (though
573    // should be handled in java code) returns immediately
574    void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
575    void Unpark();
576  
577   private:
578    void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
579  
580   public:
GetWaitMutex()581    Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
582      return wait_mutex_;
583    }
584  
GetWaitConditionVariable()585    ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
586      return wait_cond_;
587    }
588  
GetWaitMonitor()589    Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
590      return wait_monitor_;
591    }
592  
SetWaitMonitor(Monitor * mon)593    void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
594      wait_monitor_ = mon;
595    }
596  
597    // Waiter link-list support.
GetWaitNext()598    Thread* GetWaitNext() const {
599      return tlsPtr_.wait_next;
600    }
601  
SetWaitNext(Thread * next)602    void SetWaitNext(Thread* next) {
603      tlsPtr_.wait_next = next;
604    }
605  
GetClassLoaderOverride()606    jobject GetClassLoaderOverride() {
607      return tlsPtr_.class_loader_override;
608    }
609  
610    void SetClassLoaderOverride(jobject class_loader_override);
611  
612    // Create the internal representation of a stack trace, that is more time
613    // and space efficient to compute than the StackTraceElement[].
614    template<bool kTransactionActive>
615    jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
616        REQUIRES_SHARED(Locks::mutator_lock_);
617  
618    // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
619    // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
620    // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
621    // with the number of valid frames in the returned array.
622    static jobjectArray InternalStackTraceToStackTraceElementArray(
623        const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
624        jobjectArray output_array = nullptr, int* stack_depth = nullptr)
625        REQUIRES_SHARED(Locks::mutator_lock_);
626  
627    jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
628        REQUIRES_SHARED(Locks::mutator_lock_);
629  
HasDebuggerShadowFrames()630    bool HasDebuggerShadowFrames() const {
631      return tlsPtr_.frame_id_to_shadow_frame != nullptr;
632    }
633  
634    void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
635        REQUIRES_SHARED(Locks::mutator_lock_);
636  
VerifyStack()637    void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
638      if (kVerifyStack) {
639        VerifyStackImpl();
640      }
641    }
642  
643    //
644    // Offsets of various members of native Thread class, used by compiled code.
645    //
646  
647    template<PointerSize pointer_size>
ThinLockIdOffset()648    static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
649      return ThreadOffset<pointer_size>(
650          OFFSETOF_MEMBER(Thread, tls32_) +
651          OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
652    }
653  
654    template<PointerSize pointer_size>
InterruptedOffset()655    static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
656      return ThreadOffset<pointer_size>(
657          OFFSETOF_MEMBER(Thread, tls32_) +
658          OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
659    }
660  
661    template<PointerSize pointer_size>
ThreadFlagsOffset()662    static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
663      return ThreadOffset<pointer_size>(
664          OFFSETOF_MEMBER(Thread, tls32_) +
665          OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
666    }
667  
668    template<PointerSize pointer_size>
UseMterpOffset()669    static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
670      return ThreadOffset<pointer_size>(
671          OFFSETOF_MEMBER(Thread, tls32_) +
672          OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
673    }
674  
675    template<PointerSize pointer_size>
IsGcMarkingOffset()676    static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
677      return ThreadOffset<pointer_size>(
678          OFFSETOF_MEMBER(Thread, tls32_) +
679          OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
680    }
681  
IsGcMarkingSize()682    static constexpr size_t IsGcMarkingSize() {
683      return sizeof(tls32_.is_gc_marking);
684    }
685  
686    // Deoptimize the Java stack.
687    void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
688  
689   private:
690    template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)691    static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
692      size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
693      size_t scale = (pointer_size > kRuntimePointerSize) ?
694        static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
695      size_t shrink = (kRuntimePointerSize > pointer_size) ?
696        static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
697      return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
698    }
699  
700   public:
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)701    static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
702                                                  PointerSize pointer_size) {
703      if (pointer_size == PointerSize::k32) {
704        return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
705            Uint32Value();
706      } else {
707        return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
708            Uint32Value();
709      }
710    }
711  
712    template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)713    static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
714      return ThreadOffsetFromTlsPtr<pointer_size>(
715          OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
716    }
717  
718    template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)719    static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
720      return ThreadOffsetFromTlsPtr<pointer_size>(
721          OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
722    }
723  
724    // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
725    template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)726    static int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
727      // The entry point list defines 30 ReadBarrierMarkRegX entry points.
728      DCHECK_LT(reg, 30u);
729      // The ReadBarrierMarkRegX entry points are ordered by increasing
730      // register number in Thread::tls_Ptr_.quick_entrypoints.
731      return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
732          + static_cast<size_t>(pointer_size) * reg;
733    }
734  
735    template<PointerSize pointer_size>
SelfOffset()736    static constexpr ThreadOffset<pointer_size> SelfOffset() {
737      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
738    }
739  
740    template<PointerSize pointer_size>
MterpCurrentIBaseOffset()741    static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
742      return ThreadOffsetFromTlsPtr<pointer_size>(
743          OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
744    }
745  
746    template<PointerSize pointer_size>
ExceptionOffset()747    static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
748      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
749    }
750  
751    template<PointerSize pointer_size>
PeerOffset()752    static constexpr ThreadOffset<pointer_size> PeerOffset() {
753      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
754    }
755  
756  
757    template<PointerSize pointer_size>
CardTableOffset()758    static constexpr ThreadOffset<pointer_size> CardTableOffset() {
759      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
760    }
761  
762    template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()763    static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
764      return ThreadOffsetFromTlsPtr<pointer_size>(
765          OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
766    }
767  
768    template<PointerSize pointer_size>
ThreadLocalPosOffset()769    static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
770      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
771                                                                  thread_local_pos));
772    }
773  
774    template<PointerSize pointer_size>
ThreadLocalEndOffset()775    static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
776      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
777                                                                  thread_local_end));
778    }
779  
780    template<PointerSize pointer_size>
ThreadLocalObjectsOffset()781    static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
782      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
783                                                                  thread_local_objects));
784    }
785  
786    template<PointerSize pointer_size>
RosAllocRunsOffset()787    static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
788      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
789                                                                  rosalloc_runs));
790    }
791  
792    template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()793    static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
794      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
795                                                                  thread_local_alloc_stack_top));
796    }
797  
798    template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()799    static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
800      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
801                                                                  thread_local_alloc_stack_end));
802    }
803  
804    // Size of stack less any space reserved for stack overflow
GetStackSize()805    size_t GetStackSize() const {
806      return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
807    }
808  
809    ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
810  
GetStackEnd()811    uint8_t* GetStackEnd() const {
812      return tlsPtr_.stack_end;
813    }
814  
815    // Set the stack end to that to be used during a stack overflow
816    void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
817  
818    // Set the stack end to that to be used during regular execution
819    ALWAYS_INLINE void ResetDefaultStackEnd();
820  
IsHandlingStackOverflow()821    bool IsHandlingStackOverflow() const {
822      return tlsPtr_.stack_end == tlsPtr_.stack_begin;
823    }
824  
825    template<PointerSize pointer_size>
StackEndOffset()826    static constexpr ThreadOffset<pointer_size> StackEndOffset() {
827      return ThreadOffsetFromTlsPtr<pointer_size>(
828          OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
829    }
830  
831    template<PointerSize pointer_size>
JniEnvOffset()832    static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
833      return ThreadOffsetFromTlsPtr<pointer_size>(
834          OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
835    }
836  
837    template<PointerSize pointer_size>
TopOfManagedStackOffset()838    static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
839      return ThreadOffsetFromTlsPtr<pointer_size>(
840          OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
841          ManagedStack::TaggedTopQuickFrameOffset());
842    }
843  
GetManagedStack()844    const ManagedStack* GetManagedStack() const {
845      return &tlsPtr_.managed_stack;
846    }
847  
848    // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)849    void PushManagedStackFragment(ManagedStack* fragment) {
850      tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
851    }
PopManagedStackFragment(const ManagedStack & fragment)852    void PopManagedStackFragment(const ManagedStack& fragment) {
853      tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
854    }
855  
856    ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
857    ALWAYS_INLINE ShadowFrame* PopShadowFrame();
858  
859    template<PointerSize pointer_size>
TopShadowFrameOffset()860    static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
861      return ThreadOffsetFromTlsPtr<pointer_size>(
862          OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
863          ManagedStack::TopShadowFrameOffset());
864    }
865  
866    // Is the given obj in this thread's stack indirect reference table?
867    bool HandleScopeContains(jobject obj) const;
868  
869    void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id)
870        REQUIRES_SHARED(Locks::mutator_lock_);
871  
GetTopHandleScope()872    BaseHandleScope* GetTopHandleScope() {
873      return tlsPtr_.top_handle_scope;
874    }
875  
PushHandleScope(BaseHandleScope * handle_scope)876    void PushHandleScope(BaseHandleScope* handle_scope) {
877      DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
878      tlsPtr_.top_handle_scope = handle_scope;
879    }
880  
PopHandleScope()881    BaseHandleScope* PopHandleScope() {
882      BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
883      DCHECK(handle_scope != nullptr);
884      tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
885      return handle_scope;
886    }
887  
888    template<PointerSize pointer_size>
TopHandleScopeOffset()889    static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
890      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
891                                                                  top_handle_scope));
892    }
893  
GetInvokeReq()894    DebugInvokeReq* GetInvokeReq() const {
895      return tlsPtr_.debug_invoke_req;
896    }
897  
GetSingleStepControl()898    SingleStepControl* GetSingleStepControl() const {
899      return tlsPtr_.single_step_control;
900    }
901  
902    // Indicates whether this thread is ready to invoke a method for debugging. This
903    // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()904    bool IsReadyForDebugInvoke() const {
905      return tls32_.ready_for_debug_invoke;
906    }
907  
SetReadyForDebugInvoke(bool ready)908    void SetReadyForDebugInvoke(bool ready) {
909      tls32_.ready_for_debug_invoke = ready;
910    }
911  
IsDebugMethodEntry()912    bool IsDebugMethodEntry() const {
913      return tls32_.debug_method_entry_;
914    }
915  
SetDebugMethodEntry()916    void SetDebugMethodEntry() {
917      tls32_.debug_method_entry_ = true;
918    }
919  
ClearDebugMethodEntry()920    void ClearDebugMethodEntry() {
921      tls32_.debug_method_entry_ = false;
922    }
923  
GetIsGcMarking()924    bool GetIsGcMarking() const {
925      CHECK(kUseReadBarrier);
926      return tls32_.is_gc_marking;
927    }
928  
929    void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
930  
GetWeakRefAccessEnabled()931    bool GetWeakRefAccessEnabled() const {
932      CHECK(kUseReadBarrier);
933      return tls32_.weak_ref_access_enabled;
934    }
935  
SetWeakRefAccessEnabled(bool enabled)936    void SetWeakRefAccessEnabled(bool enabled) {
937      CHECK(kUseReadBarrier);
938      tls32_.weak_ref_access_enabled = enabled;
939    }
940  
GetDisableThreadFlipCount()941    uint32_t GetDisableThreadFlipCount() const {
942      CHECK(kUseReadBarrier);
943      return tls32_.disable_thread_flip_count;
944    }
945  
IncrementDisableThreadFlipCount()946    void IncrementDisableThreadFlipCount() {
947      CHECK(kUseReadBarrier);
948      ++tls32_.disable_thread_flip_count;
949    }
950  
DecrementDisableThreadFlipCount()951    void DecrementDisableThreadFlipCount() {
952      CHECK(kUseReadBarrier);
953      DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
954      --tls32_.disable_thread_flip_count;
955    }
956  
957    // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()958    bool IsRuntimeThread() const {
959      return is_runtime_thread_;
960    }
961  
SetIsRuntimeThread(bool is_runtime_thread)962    void SetIsRuntimeThread(bool is_runtime_thread) {
963      is_runtime_thread_ = is_runtime_thread;
964    }
965  
966    // Returns true if the thread is allowed to load java classes.
967    bool CanLoadClasses() const;
968  
969    // Activates single step control for debugging. The thread takes the
970    // ownership of the given SingleStepControl*. It is deleted by a call
971    // to DeactivateSingleStepControl or upon thread destruction.
972    void ActivateSingleStepControl(SingleStepControl* ssc);
973  
974    // Deactivates single step control for debugging.
975    void DeactivateSingleStepControl();
976  
977    // Sets debug invoke request for debugging. When the thread is resumed,
978    // it executes the method described by this request then sends the reply
979    // before suspending itself. The thread takes the ownership of the given
980    // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
981    void SetDebugInvokeReq(DebugInvokeReq* req);
982  
983    // Clears debug invoke request for debugging. When the thread completes
984    // method invocation, it deletes its debug invoke request and suspends
985    // itself.
986    void ClearDebugInvokeReq();
987  
988    // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()989    static mirror::Throwable* GetDeoptimizationException() {
990      // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
991      // represented by ObjPtr.
992      return reinterpret_cast<mirror::Throwable*>(0x100);
993    }
994  
995    // Currently deoptimization invokes verifier which can trigger class loading
996    // and execute Java code, so there might be nested deoptimizations happening.
997    // We need to save the ongoing deoptimization shadow frames and return
998    // values on stacks.
999    // 'from_code' denotes whether the deoptimization was explicitly made from
1000    // compiled code.
1001    // 'method_type' contains info on whether deoptimization should advance
1002    // dex_pc.
1003    void PushDeoptimizationContext(const JValue& return_value,
1004                                   bool is_reference,
1005                                   ObjPtr<mirror::Throwable> exception,
1006                                   bool from_code,
1007                                   DeoptimizationMethodType method_type)
1008        REQUIRES_SHARED(Locks::mutator_lock_);
1009    void PopDeoptimizationContext(JValue* result,
1010                                  ObjPtr<mirror::Throwable>* exception,
1011                                  bool* from_code,
1012                                  DeoptimizationMethodType* method_type)
1013        REQUIRES_SHARED(Locks::mutator_lock_);
1014    void AssertHasDeoptimizationContext()
1015        REQUIRES_SHARED(Locks::mutator_lock_);
1016    void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1017    ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
1018  
1019    // For debugger, find the shadow frame that corresponds to a frame id.
1020    // Or return null if there is none.
1021    ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1022        REQUIRES_SHARED(Locks::mutator_lock_);
1023    // For debugger, find the bool array that keeps track of the updated vreg set
1024    // for a frame id.
1025    bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1026    // For debugger, find the shadow frame that corresponds to a frame id. If
1027    // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1028    ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1029                                                 uint32_t num_vregs,
1030                                                 ArtMethod* method,
1031                                                 uint32_t dex_pc)
1032        REQUIRES_SHARED(Locks::mutator_lock_);
1033  
1034    // Delete the entry that maps from frame_id to shadow_frame.
1035    void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1036        REQUIRES_SHARED(Locks::mutator_lock_);
1037  
GetInstrumentationStack()1038    std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
1039      return tlsPtr_.instrumentation_stack;
1040    }
1041  
GetStackTraceSample()1042    std::vector<ArtMethod*>* GetStackTraceSample() const {
1043      DCHECK(!IsAotCompiler());
1044      return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1045    }
1046  
SetStackTraceSample(std::vector<ArtMethod * > * sample)1047    void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1048      DCHECK(!IsAotCompiler());
1049      tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1050    }
1051  
GetVerifierDeps()1052    verifier::VerifierDeps* GetVerifierDeps() const {
1053      DCHECK(IsAotCompiler());
1054      return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1055    }
1056  
1057    // It is the responsability of the caller to make sure the verifier_deps
1058    // entry in the thread is cleared before destruction of the actual VerifierDeps
1059    // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1060    void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1061      DCHECK(IsAotCompiler());
1062      DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1063      tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1064    }
1065  
GetTraceClockBase()1066    uint64_t GetTraceClockBase() const {
1067      return tls64_.trace_clock_base;
1068    }
1069  
SetTraceClockBase(uint64_t clock_base)1070    void SetTraceClockBase(uint64_t clock_base) {
1071      tls64_.trace_clock_base = clock_base;
1072    }
1073  
GetHeldMutex(LockLevel level)1074    BaseMutex* GetHeldMutex(LockLevel level) const {
1075      return tlsPtr_.held_mutexes[level];
1076    }
1077  
SetHeldMutex(LockLevel level,BaseMutex * mutex)1078    void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1079      tlsPtr_.held_mutexes[level] = mutex;
1080    }
1081  
1082    void ClearSuspendBarrier(AtomicInteger* target)
1083        REQUIRES(Locks::thread_suspend_count_lock_);
1084  
ReadFlag(ThreadFlag flag)1085    bool ReadFlag(ThreadFlag flag) const {
1086      return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1087    }
1088  
TestAllFlags()1089    bool TestAllFlags() const {
1090      return (tls32_.state_and_flags.as_struct.flags != 0);
1091    }
1092  
AtomicSetFlag(ThreadFlag flag)1093    void AtomicSetFlag(ThreadFlag flag) {
1094      tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst);
1095    }
1096  
AtomicClearFlag(ThreadFlag flag)1097    void AtomicClearFlag(ThreadFlag flag) {
1098      tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
1099    }
1100  
UseMterp()1101    bool UseMterp() const {
1102      return tls32_.use_mterp.load();
1103    }
1104  
1105    void ResetQuickAllocEntryPointsForThread(bool is_marking);
1106  
1107    // Returns the remaining space in the TLAB.
TlabSize()1108    size_t TlabSize() const {
1109      return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1110    }
1111  
1112    // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1113    size_t TlabRemainingCapacity() const {
1114      return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1115    }
1116  
1117    // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1118    void ExpandTlab(size_t bytes) {
1119      tlsPtr_.thread_local_end += bytes;
1120      DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1121    }
1122  
1123    // Doesn't check that there is room.
1124    mirror::Object* AllocTlab(size_t bytes);
1125    void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1126    bool HasTlab() const;
GetTlabStart()1127    uint8_t* GetTlabStart() {
1128      return tlsPtr_.thread_local_start;
1129    }
GetTlabPos()1130    uint8_t* GetTlabPos() {
1131      return tlsPtr_.thread_local_pos;
1132    }
1133  
1134    // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1135    // equal to a valid pointer.
1136    // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1137    void RemoveSuspendTrigger() {
1138      tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1139    }
1140  
1141    // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1142    // The next time a suspend check is done, it will load from the value at this address
1143    // and trigger a SIGSEGV.
TriggerSuspend()1144    void TriggerSuspend() {
1145      tlsPtr_.suspend_trigger = nullptr;
1146    }
1147  
1148  
1149    // Push an object onto the allocation stack.
1150    bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1151        REQUIRES_SHARED(Locks::mutator_lock_);
1152  
1153    // Set the thread local allocation pointers to the given pointers.
1154    void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1155                                       StackReference<mirror::Object>* end);
1156  
1157    // Resets the thread local allocation pointers.
1158    void RevokeThreadLocalAllocationStack();
1159  
GetThreadLocalBytesAllocated()1160    size_t GetThreadLocalBytesAllocated() const {
1161      return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1162    }
1163  
GetThreadLocalObjectsAllocated()1164    size_t GetThreadLocalObjectsAllocated() const {
1165      return tlsPtr_.thread_local_objects;
1166    }
1167  
GetRosAllocRun(size_t index)1168    void* GetRosAllocRun(size_t index) const {
1169      return tlsPtr_.rosalloc_runs[index];
1170    }
1171  
SetRosAllocRun(size_t index,void * run)1172    void SetRosAllocRun(size_t index, void* run) {
1173      tlsPtr_.rosalloc_runs[index] = run;
1174    }
1175  
1176    bool ProtectStack(bool fatal_on_error = true);
1177    bool UnprotectStack();
1178  
SetMterpCurrentIBase(void * ibase)1179    void SetMterpCurrentIBase(void* ibase) {
1180      tlsPtr_.mterp_current_ibase = ibase;
1181    }
1182  
GetMterpCurrentIBase()1183    const void* GetMterpCurrentIBase() const {
1184      return tlsPtr_.mterp_current_ibase;
1185    }
1186  
HandlingSignal()1187    bool HandlingSignal() const {
1188      return tls32_.handling_signal_;
1189    }
1190  
SetHandlingSignal(bool handling_signal)1191    void SetHandlingSignal(bool handling_signal) {
1192      tls32_.handling_signal_ = handling_signal;
1193    }
1194  
IsTransitioningToRunnable()1195    bool IsTransitioningToRunnable() const {
1196      return tls32_.is_transitioning_to_runnable;
1197    }
1198  
SetIsTransitioningToRunnable(bool value)1199    void SetIsTransitioningToRunnable(bool value) {
1200      tls32_.is_transitioning_to_runnable = value;
1201    }
1202  
DecrementForceInterpreterCount()1203    uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1204      return --tls32_.force_interpreter_count;
1205    }
1206  
IncrementForceInterpreterCount()1207    uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1208      return ++tls32_.force_interpreter_count;
1209    }
1210  
SetForceInterpreterCount(uint32_t value)1211    void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1212      tls32_.force_interpreter_count = value;
1213    }
1214  
ForceInterpreterCount()1215    uint32_t ForceInterpreterCount() const {
1216      return tls32_.force_interpreter_count;
1217    }
1218  
IsForceInterpreter()1219    bool IsForceInterpreter() const {
1220      return tls32_.force_interpreter_count != 0;
1221    }
1222  
1223    void PushVerifier(verifier::MethodVerifier* verifier);
1224    void PopVerifier(verifier::MethodVerifier* verifier);
1225  
1226    void InitStringEntryPoints();
1227  
ModifyDebugDisallowReadBarrier(int8_t delta)1228    void ModifyDebugDisallowReadBarrier(int8_t delta) {
1229      debug_disallow_read_barrier_ += delta;
1230    }
1231  
GetDebugDisallowReadBarrierCount()1232    uint8_t GetDebugDisallowReadBarrierCount() const {
1233      return debug_disallow_read_barrier_;
1234    }
1235  
1236    // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1237    // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1238    // it from being deleted.
1239    TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1240  
1241    // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1242    // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1243    void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1244  
1245    // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1246    bool IsJitSensitiveThread() const {
1247      return this == jit_sensitive_thread_;
1248    }
1249  
1250    bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1251  
1252    // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1253    static bool IsSensitiveThread() {
1254      if (is_sensitive_thread_hook_ != nullptr) {
1255        return (*is_sensitive_thread_hook_)();
1256      }
1257      return false;
1258    }
1259  
1260    // Set to the read barrier marking entrypoints to be non-null.
1261    void SetReadBarrierEntrypoints();
1262  
1263    static jobject CreateCompileTimePeer(JNIEnv* env,
1264                                         const char* name,
1265                                         bool as_daemon,
1266                                         jobject thread_group)
1267        REQUIRES_SHARED(Locks::mutator_lock_);
1268  
GetInterpreterCache()1269    ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1270      return &interpreter_cache_;
1271    }
1272  
1273    // Clear all thread-local interpreter caches.
1274    //
1275    // Since the caches are keyed by memory pointer to dex instructions, this must be
1276    // called when any dex code is unloaded (before different code gets loaded at the
1277    // same memory location).
1278    //
1279    // If presence of cache entry implies some pre-conditions, this must also be
1280    // called if the pre-conditions might no longer hold true.
1281    static void ClearAllInterpreterCaches();
1282  
1283    template<PointerSize pointer_size>
InterpreterCacheOffset()1284    static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1285      return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1286    }
1287  
InterpreterCacheSizeLog2()1288    static constexpr int InterpreterCacheSizeLog2() {
1289      return WhichPowerOf2(InterpreterCache::kSize);
1290    }
1291  
1292   private:
1293    explicit Thread(bool daemon);
1294    ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1295    void Destroy();
1296  
1297    void NotifyInTheadList()
1298        REQUIRES_SHARED(Locks::thread_list_lock_);
1299  
1300    // Attaches the calling native thread to the runtime, returning the new native peer.
1301    // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1302    template <typename PeerAction>
1303    static Thread* Attach(const char* thread_name,
1304                          bool as_daemon,
1305                          PeerAction p);
1306  
1307    void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1308  
1309    template<bool kTransactionActive>
1310    static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1311                         ObjPtr<mirror::Object> peer,
1312                         jboolean thread_is_daemon,
1313                         jobject thread_group,
1314                         jobject thread_name,
1315                         jint thread_priority)
1316        REQUIRES_SHARED(Locks::mutator_lock_);
1317  
1318    // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
1319    // Dbg::ManageDeoptimization.
SetStateUnsafe(ThreadState new_state)1320    ThreadState SetStateUnsafe(ThreadState new_state) {
1321      ThreadState old_state = GetState();
1322      if (old_state == kRunnable && new_state != kRunnable) {
1323        // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1324        // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1325        // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1326        TransitionToSuspendedAndRunCheckpoints(new_state);
1327        // Since we transitioned to a suspended state, check the pass barrier requests.
1328        PassActiveSuspendBarriers();
1329      } else {
1330        tls32_.state_and_flags.as_struct.state = new_state;
1331      }
1332      return old_state;
1333    }
1334  
1335    void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1336  
1337    void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1338    void DumpStack(std::ostream& os,
1339                   bool dump_native_stack = true,
1340                   BacktraceMap* backtrace_map = nullptr,
1341                   bool force_dump_stack = false) const
1342        REQUIRES_SHARED(Locks::mutator_lock_);
1343  
1344    // Out-of-line conveniences for debugging in gdb.
1345    static Thread* CurrentFromGdb();  // Like Thread::Current.
1346    // Like Thread::Dump(std::cerr).
1347    void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1348  
1349    static void* CreateCallback(void* arg);
1350  
1351    void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1352        REQUIRES_SHARED(Locks::mutator_lock_);
1353    void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1354        REQUIRES_SHARED(Locks::mutator_lock_);
1355  
1356    // Initialize a thread.
1357    //
1358    // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1359    // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1360    // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1361    // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1362    // of false).
1363    bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1364        REQUIRES(Locks::runtime_shutdown_lock_);
1365    void InitCardTable();
1366    void InitCpu();
1367    void CleanupCpu();
1368    void InitTlsEntryPoints();
1369    void InitTid();
1370    void InitPthreadKeySelf();
1371    bool InitStackHwm();
1372  
1373    void SetUpAlternateSignalStack();
1374    void TearDownAlternateSignalStack();
1375  
1376    ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1377        REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1378  
1379    ALWAYS_INLINE void PassActiveSuspendBarriers()
1380        REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1381  
1382    // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1383    static void SetJitSensitiveThread() {
1384      if (jit_sensitive_thread_ == nullptr) {
1385        jit_sensitive_thread_ = Thread::Current();
1386      } else {
1387        LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1388            << Thread::Current()->GetTid();
1389      }
1390    }
1391  
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1392    static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1393      is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1394    }
1395  
1396    bool ModifySuspendCountInternal(Thread* self,
1397                                    int delta,
1398                                    AtomicInteger* suspend_barrier,
1399                                    SuspendReason reason)
1400        WARN_UNUSED
1401        REQUIRES(Locks::thread_suspend_count_lock_);
1402  
1403    // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1404    // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1405    // the kCheckpointRequest flag is cleared.
1406    void RunCheckpointFunction();
1407    void RunEmptyCheckpoint();
1408  
1409    bool PassActiveSuspendBarriers(Thread* self)
1410        REQUIRES(!Locks::thread_suspend_count_lock_);
1411  
1412    // Install the protected region for implicit stack checks.
1413    void InstallImplicitProtection();
1414  
1415    template <bool kPrecise>
1416    void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1417  
1418    static bool IsAotCompiler();
1419  
1420    void ReleaseLongJumpContextInternal();
1421  
1422    // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1423    // change from being Suspended to Runnable without a suspend request occurring.
1424    union PACKED(4) StateAndFlags {
StateAndFlags()1425      StateAndFlags() {}
1426      struct PACKED(4) {
1427        // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1428        // ThreadFlags for bit field meanings.
1429        volatile uint16_t flags;
1430        // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1431        // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1432        // operation. If a thread is suspended and a suspend_request is present, a thread may not
1433        // change to Runnable as a GC or other operation is in progress.
1434        volatile uint16_t state;
1435      } as_struct;
1436      AtomicInteger as_atomic_int;
1437      volatile int32_t as_int;
1438  
1439     private:
1440      // gcc does not handle struct with volatile member assignments correctly.
1441      // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1442      DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1443    };
1444    static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1445  
1446    static void ThreadExitCallback(void* arg);
1447  
1448    // Maximum number of suspend barriers.
1449    static constexpr uint32_t kMaxSuspendBarriers = 3;
1450  
1451    // Has Thread::Startup been called?
1452    static bool is_started_;
1453  
1454    // TLS key used to retrieve the Thread*.
1455    static pthread_key_t pthread_key_self_;
1456  
1457    // Used to notify threads that they should attempt to resume, they will suspend again if
1458    // their suspend count is > 0.
1459    static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1460  
1461    // Hook passed by framework which returns true
1462    // when StrictMode events are traced for the current thread.
1463    static bool (*is_sensitive_thread_hook_)();
1464    // Stores the jit sensitive thread (which for now is the UI thread).
1465    static Thread* jit_sensitive_thread_;
1466  
1467    /***********************************************************************************************/
1468    // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1469    // pointer size differences. To encourage shorter encoding, more frequently used values appear
1470    // first if possible.
1471    /***********************************************************************************************/
1472  
1473    struct PACKED(4) tls_32bit_sized_values {
1474      // We have no control over the size of 'bool', but want our boolean fields
1475      // to be 4-byte quantities.
1476      typedef uint32_t bool32_t;
1477  
tls_32bit_sized_valuestls_32bit_sized_values1478      explicit tls_32bit_sized_values(bool is_daemon) :
1479        suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1480        daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
1481        thread_exit_check_count(0), handling_signal_(false),
1482        is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
1483        debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
1484        disable_thread_flip_count(0), user_code_suspend_count(0), force_interpreter_count(0) {
1485      }
1486  
1487      union StateAndFlags state_and_flags;
1488      static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1489                    "Size of state_and_flags and int32 are different");
1490  
1491      // A non-zero value is used to tell the current thread to enter a safe point
1492      // at the next poll.
1493      int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1494  
1495      // How much of 'suspend_count_' is by request of the debugger, used to set things right
1496      // when the debugger detaches. Must be <= suspend_count_.
1497      int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1498  
1499      // Thin lock thread id. This is a small integer used by the thin lock implementation.
1500      // This is not to be confused with the native thread's tid, nor is it the value returned
1501      // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1502      // important difference between this id and the ids visible to managed code is that these
1503      // ones get reused (to ensure that they fit in the number of bits available).
1504      uint32_t thin_lock_thread_id;
1505  
1506      // System thread id.
1507      uint32_t tid;
1508  
1509      // Is the thread a daemon?
1510      const bool32_t daemon;
1511  
1512      // A boolean telling us whether we're recursively throwing OOME.
1513      bool32_t throwing_OutOfMemoryError;
1514  
1515      // A positive value implies we're in a region where thread suspension isn't expected.
1516      uint32_t no_thread_suspension;
1517  
1518      // How many times has our pthread key's destructor been called?
1519      uint32_t thread_exit_check_count;
1520  
1521      // True if signal is being handled by this thread.
1522      bool32_t handling_signal_;
1523  
1524      // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1525      // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1526      // the rest of them.
1527      bool32_t is_transitioning_to_runnable;
1528  
1529      // True if the thread has been suspended by a debugger event. This is
1530      // used to invoke method from the debugger which is only allowed when
1531      // the thread is suspended by an event.
1532      bool32_t ready_for_debug_invoke;
1533  
1534      // True if the thread enters a method. This is used to detect method entry
1535      // event for the debugger.
1536      bool32_t debug_method_entry_;
1537  
1538      // True if the GC is in the marking phase. This is used for the CC collector only. This is
1539      // thread local so that we can simplify the logic to check for the fast path of read barriers of
1540      // GC roots.
1541      bool32_t is_gc_marking;
1542  
1543      // Thread "interrupted" status; stays raised until queried or thrown.
1544      Atomic<bool32_t> interrupted;
1545  
1546      AtomicInteger park_state_;
1547  
1548      // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1549      // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1550      // processing of the CC collector only. This is thread local so that we can enable/disable weak
1551      // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1552      // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1553      // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1554      // ReferenceProcessor::EnableSlowPath().
1555      bool32_t weak_ref_access_enabled;
1556  
1557      // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1558      // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1559      // critical section enter.
1560      uint32_t disable_thread_flip_count;
1561  
1562      // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1563      // suspended by the runtime from those suspended by user code.
1564      // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1565      // told that AssertHeld should be good enough.
1566      int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1567  
1568      // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1569      // thread must remain in interpreted code as much as possible.
1570      uint32_t force_interpreter_count;
1571  
1572      // True if everything is in the ideal state for fast interpretation.
1573      // False if we need to switch to the C++ interpreter to handle special cases.
1574      std::atomic<bool32_t> use_mterp;
1575    } tls32_;
1576  
1577    struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1578      tls_64bit_sized_values() : trace_clock_base(0) {
1579      }
1580  
1581      // The clock base used for tracing.
1582      uint64_t trace_clock_base;
1583  
1584      RuntimeStats stats;
1585    } tls64_;
1586  
PACKED(sizeof (void *))1587    struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1588        tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1589        managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1590        self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1591        deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1592        top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1593        instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
1594        stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1595        frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1596        last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1597        thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1598        thread_local_limit(nullptr),
1599        thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1600        thread_local_alloc_stack_end(nullptr),
1601        flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
1602        async_exception(nullptr) {
1603        std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1604      }
1605  
1606      // The biased card table, see CardTable for details.
1607      uint8_t* card_table;
1608  
1609      // The pending exception or null.
1610      mirror::Throwable* exception;
1611  
1612      // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1613      // We leave extra space so there's room for the code that throws StackOverflowError.
1614      uint8_t* stack_end;
1615  
1616      // The top of the managed stack often manipulated directly by compiler generated code.
1617      ManagedStack managed_stack;
1618  
1619      // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1620      // normally set to the address of itself.
1621      uintptr_t* suspend_trigger;
1622  
1623      // Every thread may have an associated JNI environment
1624      JNIEnvExt* jni_env;
1625  
1626      // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1627      // created thread.
1628      JNIEnvExt* tmp_jni_env;
1629  
1630      // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1631      // is easy but getting the address of Thread::Current is hard. This field can be read off of
1632      // Thread::Current to give the address.
1633      Thread* self;
1634  
1635      // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1636      // start up, until the thread is registered and the local opeer_ is used.
1637      mirror::Object* opeer;
1638      jobject jpeer;
1639  
1640      // The "lowest addressable byte" of the stack.
1641      uint8_t* stack_begin;
1642  
1643      // Size of the stack.
1644      size_t stack_size;
1645  
1646      // Sampling profiler and AOT verification cannot happen on the same run, so we share
1647      // the same entry for the stack trace and the verifier deps.
1648      union DepsOrStackTraceSample {
1649        DepsOrStackTraceSample() {
1650          verifier_deps = nullptr;
1651          stack_trace_sample = nullptr;
1652        }
1653        // Pointer to previous stack trace captured by sampling profiler.
1654        std::vector<ArtMethod*>* stack_trace_sample;
1655        // When doing AOT verification, per-thread VerifierDeps.
1656        verifier::VerifierDeps* verifier_deps;
1657      } deps_or_stack_trace_sample;
1658  
1659      // The next thread in the wait set this thread is part of or null if not waiting.
1660      Thread* wait_next;
1661  
1662      // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1663      mirror::Object* monitor_enter_object;
1664  
1665      // Top of linked list of handle scopes or null for none.
1666      BaseHandleScope* top_handle_scope;
1667  
1668      // Needed to get the right ClassLoader in JNI_OnLoad, but also
1669      // useful for testing.
1670      jobject class_loader_override;
1671  
1672      // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1673      Context* long_jump_context;
1674  
1675      // Additional stack used by method instrumentation to store method and return pc values.
1676      // Stored as a pointer since std::deque is not PACKED.
1677      std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1678  
1679      // JDWP invoke-during-breakpoint support.
1680      DebugInvokeReq* debug_invoke_req;
1681  
1682      // JDWP single-stepping support.
1683      SingleStepControl* single_step_control;
1684  
1685      // For gc purpose, a shadow frame record stack that keeps track of:
1686      // 1) shadow frames under construction.
1687      // 2) deoptimization shadow frames.
1688      StackedShadowFrameRecord* stacked_shadow_frame_record;
1689  
1690      // Deoptimization return value record stack.
1691      DeoptimizationContextRecord* deoptimization_context_stack;
1692  
1693      // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1694      // Shadow frames may be created before deoptimization happens so that the debugger can
1695      // set local values there first.
1696      FrameIdToShadowFrame* frame_id_to_shadow_frame;
1697  
1698      // A cached copy of the java.lang.Thread's name.
1699      std::string* name;
1700  
1701      // A cached pthread_t for the pthread underlying this Thread*.
1702      pthread_t pthread_self;
1703  
1704      // If no_thread_suspension_ is > 0, what is causing that assertion.
1705      const char* last_no_thread_suspension_cause;
1706  
1707      // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1708      // requests another checkpoint, it goes to the checkpoint overflow list.
1709      Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1710  
1711      // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1712      // Locks::thread_suspend_count_lock_.
1713      // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1714      // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1715      AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1716  
1717      // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1718      uint8_t* thread_local_start;
1719  
1720      // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1721      // potentially better performance.
1722      uint8_t* thread_local_pos;
1723      uint8_t* thread_local_end;
1724  
1725      // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1726      // equal to thread_local_end.
1727      uint8_t* thread_local_limit;
1728  
1729      size_t thread_local_objects;
1730  
1731      // Entrypoint function pointers.
1732      // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1733      JniEntryPoints jni_entrypoints;
1734      QuickEntryPoints quick_entrypoints;
1735  
1736      // Mterp jump table base.
1737      void* mterp_current_ibase;
1738  
1739      // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1740      void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1741  
1742      // Thread-local allocation stack data/routines.
1743      StackReference<mirror::Object>* thread_local_alloc_stack_top;
1744      StackReference<mirror::Object>* thread_local_alloc_stack_end;
1745  
1746      // Support for Mutex lock hierarchy bug detection.
1747      BaseMutex* held_mutexes[kLockLevelCount];
1748  
1749      // The function used for thread flip.
1750      Closure* flip_function;
1751  
1752      // Current method verifier, used for root marking.
1753      verifier::MethodVerifier* method_verifier;
1754  
1755      // Thread-local mark stack for the concurrent copying collector.
1756      gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1757  
1758      // The pending async-exception or null.
1759      mirror::Throwable* async_exception;
1760    } tlsPtr_;
1761  
1762    // Small thread-local cache to be used from the interpreter.
1763    // It is keyed by dex instruction pointer.
1764    // The value is opcode-depended (e.g. field offset).
1765    InterpreterCache interpreter_cache_;
1766  
1767    // All fields below this line should not be accessed by native code. This means these fields can
1768    // be modified, rearranged, added or removed without having to modify asm_support.h
1769  
1770    // Guards the 'wait_monitor_' members.
1771    Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1772  
1773    // Condition variable waited upon during a wait.
1774    ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1775    // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1776    Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1777  
1778    // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1779    uint8_t debug_disallow_read_barrier_ = 0;
1780  
1781    // Note that it is not in the packed struct, may not be accessed for cross compilation.
1782    uintptr_t poison_object_cookie_ = 0;
1783  
1784    // Pending extra checkpoints if checkpoint_function_ is already used.
1785    std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1786  
1787    // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
1788    // compiled code or entrypoints.
1789    SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
1790  
1791    // True if the thread is some form of runtime thread (ex, GC or JIT).
1792    bool is_runtime_thread_;
1793  
1794    friend class Dbg;  // For SetStateUnsafe.
1795    friend class gc::collector::SemiSpace;  // For getting stack traces.
1796    friend class Runtime;  // For CreatePeer.
1797    friend class QuickExceptionHandler;  // For dumping the stack.
1798    friend class ScopedThreadStateChange;
1799    friend class StubTest;  // For accessing entrypoints.
1800    friend class ThreadList;  // For ~Thread and Destroy.
1801  
1802    friend class EntrypointsOrderTest;  // To test the order of tls entries.
1803  
1804    DISALLOW_COPY_AND_ASSIGN(Thread);
1805  };
1806  
1807  class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1808   public:
1809    ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
1810                                                 bool enabled = true)
ACQUIRE(Roles::uninterruptible_)1811        ACQUIRE(Roles::uninterruptible_)
1812        : enabled_(enabled) {
1813      if (!enabled_) {
1814        return;
1815      }
1816      if (kIsDebugBuild) {
1817        self_ = Thread::Current();
1818        old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1819      } else {
1820        Roles::uninterruptible_.Acquire();  // No-op.
1821      }
1822    }
~ScopedAssertNoThreadSuspension()1823    ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1824      if (!enabled_) {
1825        return;
1826      }
1827      if (kIsDebugBuild) {
1828        self_->EndAssertNoThreadSuspension(old_cause_);
1829      } else {
1830        Roles::uninterruptible_.Release();  // No-op.
1831      }
1832    }
1833  
1834   private:
1835    Thread* self_;
1836    const bool enabled_;
1837    const char* old_cause_;
1838  };
1839  
1840  class ScopedStackedShadowFramePusher {
1841   public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1842    ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1843      : self_(self), type_(type) {
1844      self_->PushStackedShadowFrame(sf, type);
1845    }
~ScopedStackedShadowFramePusher()1846    ~ScopedStackedShadowFramePusher() {
1847      self_->PopStackedShadowFrame(type_);
1848    }
1849  
1850   private:
1851    Thread* const self_;
1852    const StackedShadowFrameType type_;
1853  
1854    DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1855  };
1856  
1857  // Only works for debug builds.
1858  class ScopedDebugDisallowReadBarriers {
1859   public:
ScopedDebugDisallowReadBarriers(Thread * self)1860    explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1861      self_->ModifyDebugDisallowReadBarrier(1);
1862    }
~ScopedDebugDisallowReadBarriers()1863    ~ScopedDebugDisallowReadBarriers() {
1864      self_->ModifyDebugDisallowReadBarrier(-1);
1865    }
1866  
1867   private:
1868    Thread* const self_;
1869  };
1870  
1871  class ScopedTransitioningToRunnable : public ValueObject {
1872   public:
ScopedTransitioningToRunnable(Thread * self)1873    explicit ScopedTransitioningToRunnable(Thread* self)
1874        : self_(self) {
1875      DCHECK_EQ(self, Thread::Current());
1876      if (kUseReadBarrier) {
1877        self_->SetIsTransitioningToRunnable(true);
1878      }
1879    }
1880  
~ScopedTransitioningToRunnable()1881    ~ScopedTransitioningToRunnable() {
1882      if (kUseReadBarrier) {
1883        self_->SetIsTransitioningToRunnable(false);
1884      }
1885    }
1886  
1887   private:
1888    Thread* const self_;
1889  };
1890  
1891  class ThreadLifecycleCallback {
1892   public:
~ThreadLifecycleCallback()1893    virtual ~ThreadLifecycleCallback() {}
1894  
1895    virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1896    virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1897  };
1898  
1899  std::ostream& operator<<(std::ostream& os, const Thread& thread);
1900  std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
1901  
1902  }  // namespace art
1903  
1904  #endif  // ART_RUNTIME_THREAD_H_
1905