• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2011 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  #ifndef ART_RUNTIME_THREAD_H_
18  #define ART_RUNTIME_THREAD_H_
19  
20  #include <setjmp.h>
21  
22  #include <bitset>
23  #include <deque>
24  #include <iosfwd>
25  #include <list>
26  #include <memory>
27  #include <string>
28  
29  #include "arch/context.h"
30  #include "arch/instruction_set.h"
31  #include "base/atomic.h"
32  #include "base/enums.h"
33  #include "base/macros.h"
34  #include "base/mutex.h"
35  #include "entrypoints/jni/jni_entrypoints.h"
36  #include "entrypoints/quick/quick_entrypoints.h"
37  #include "globals.h"
38  #include "handle_scope.h"
39  #include "instrumentation.h"
40  #include "jvalue.h"
41  #include "managed_stack.h"
42  #include "offsets.h"
43  #include "read_barrier_config.h"
44  #include "runtime_stats.h"
45  #include "suspend_reason.h"
46  #include "thread_state.h"
47  
48  class BacktraceMap;
49  
50  namespace art {
51  
52  namespace gc {
53  namespace accounting {
54  template<class T> class AtomicStack;
55  }  // namespace accounting
56  namespace collector {
57  class SemiSpace;
58  }  // namespace collector
59  }  // namespace gc
60  
61  namespace mirror {
62  class Array;
63  class Class;
64  class ClassLoader;
65  class Object;
66  template<class T> class ObjectArray;
67  template<class T> class PrimitiveArray;
68  typedef PrimitiveArray<int32_t> IntArray;
69  class StackTraceElement;
70  class String;
71  class Throwable;
72  }  // namespace mirror
73  
74  namespace verifier {
75  class MethodVerifier;
76  class VerifierDeps;
77  }  // namespace verifier
78  
79  class ArtMethod;
80  class BaseMutex;
81  class ClassLinker;
82  class Closure;
83  class Context;
84  struct DebugInvokeReq;
85  class DeoptimizationContextRecord;
86  class DexFile;
87  class FrameIdToShadowFrame;
88  class JavaVMExt;
89  class JNIEnvExt;
90  class Monitor;
91  class RootVisitor;
92  class ScopedObjectAccessAlreadyRunnable;
93  class ShadowFrame;
94  class SingleStepControl;
95  class StackedShadowFrameRecord;
96  class Thread;
97  class ThreadList;
98  enum VisitRootFlags : uint8_t;
99  
100  // Thread priorities. These must match the Thread.MIN_PRIORITY,
101  // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
102  enum ThreadPriority {
103    kMinThreadPriority = 1,
104    kNormThreadPriority = 5,
105    kMaxThreadPriority = 10,
106  };
107  
108  enum ThreadFlag {
109    kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
110                            // safepoint handler.
111    kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
112    kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
113    kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
114  };
115  
116  enum class StackedShadowFrameType {
117    kShadowFrameUnderConstruction,
118    kDeoptimizationShadowFrame,
119  };
120  
121  // The type of method that triggers deoptimization. It contains info on whether
122  // the deoptimized method should advance dex_pc.
123  enum class DeoptimizationMethodType {
124    kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
125    kDefault     // dex pc may or may not advance depending on other conditions.
126  };
127  
128  // This should match RosAlloc::kNumThreadLocalSizeBrackets.
129  static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
130  
131  // Thread's stack layout for implicit stack overflow checks:
132  //
133  //   +---------------------+  <- highest address of stack memory
134  //   |                     |
135  //   .                     .  <- SP
136  //   |                     |
137  //   |                     |
138  //   +---------------------+  <- stack_end
139  //   |                     |
140  //   |  Gap                |
141  //   |                     |
142  //   +---------------------+  <- stack_begin
143  //   |                     |
144  //   | Protected region    |
145  //   |                     |
146  //   +---------------------+  <- lowest address of stack memory
147  //
148  // The stack always grows down in memory.  At the lowest address is a region of memory
149  // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
150  // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
151  // between the stack_end and the highest address in stack memory.  An implicit stack
152  // overflow check is a read of memory at a certain offset below the current SP (4K typically).
153  // If the thread's SP is below the stack_end address this will be a read into the protected
154  // region.  If the SP is above the stack_end address, the thread is guaranteed to have
155  // at least 4K of space.  Because stack overflow checks are only performed in generated code,
156  // if the thread makes a call out to a native function (through JNI), that native function
157  // might only have 4K of memory (if the SP is adjacent to stack_end).
158  
159  class Thread {
160   public:
161    static const size_t kStackOverflowImplicitCheckSize;
162    static constexpr bool kVerifyStack = kIsDebugBuild;
163  
164    // Creates a new native thread corresponding to the given managed peer.
165    // Used to implement Thread.start.
166    static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
167  
168    // Attaches the calling native thread to the runtime, returning the new native peer.
169    // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
170    static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
171                          bool create_peer);
172    // Attaches the calling native thread to the runtime, returning the new native peer.
173    static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
174  
175    // Reset internal state of child thread after fork.
176    void InitAfterFork();
177  
178    // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
179    // high cost and so we favor passing self around when possible.
180    // TODO: mark as PURE so the compiler may coalesce and remove?
181    static Thread* Current();
182  
183    // On a runnable thread, check for pending thread suspension request and handle if pending.
184    void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
185  
186    // Process pending thread suspension request and handle if pending.
187    void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
188  
189    // Process a pending empty checkpoint if pending.
190    void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
191    void CheckEmptyCheckpointFromMutex();
192  
193    static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
194                                     ObjPtr<mirror::Object> thread_peer)
195        REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
196        REQUIRES_SHARED(Locks::mutator_lock_);
197    static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
198        REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
199        REQUIRES_SHARED(Locks::mutator_lock_);
200  
201    // Translates 172 to pAllocArrayFromCode and so on.
202    template<PointerSize size_of_pointers>
203    static void DumpThreadOffset(std::ostream& os, uint32_t offset);
204  
205    // Dumps a one-line summary of thread state (used for operator<<).
206    void ShortDump(std::ostream& os) const;
207  
208    // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
209    void Dump(std::ostream& os,
210              bool dump_native_stack = true,
211              BacktraceMap* backtrace_map = nullptr,
212              bool force_dump_stack = false) const
213        REQUIRES(!Locks::thread_suspend_count_lock_)
214        REQUIRES_SHARED(Locks::mutator_lock_);
215  
216    void DumpJavaStack(std::ostream& os,
217                       bool check_suspended = true,
218                       bool dump_locks = true) const
219        REQUIRES(!Locks::thread_suspend_count_lock_)
220        REQUIRES_SHARED(Locks::mutator_lock_);
221  
222    // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
223    // case we use 'tid' to identify the thread, and we'll include as much information as we can.
224    static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
225        REQUIRES(!Locks::thread_suspend_count_lock_)
226        REQUIRES_SHARED(Locks::mutator_lock_);
227  
GetState()228    ThreadState GetState() const {
229      DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
230      DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
231      return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
232    }
233  
234    ThreadState SetState(ThreadState new_state);
235  
GetSuspendCount()236    int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
237      return tls32_.suspend_count;
238    }
239  
GetUserCodeSuspendCount()240    int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
241                                                 Locks::user_code_suspension_lock_) {
242      return tls32_.user_code_suspend_count;
243    }
244  
GetDebugSuspendCount()245    int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
246      return tls32_.debug_suspend_count;
247    }
248  
IsSuspended()249    bool IsSuspended() const {
250      union StateAndFlags state_and_flags;
251      state_and_flags.as_int = tls32_.state_and_flags.as_int;
252      return state_and_flags.as_struct.state != kRunnable &&
253          (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
254    }
255  
256    // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
257    // release thread_suspend_count_lock_ internally.
258    ALWAYS_INLINE
259    bool ModifySuspendCount(Thread* self,
260                            int delta,
261                            AtomicInteger* suspend_barrier,
262                            SuspendReason reason)
263        WARN_UNUSED
264        REQUIRES(Locks::thread_suspend_count_lock_);
265  
266    // Requests a checkpoint closure to run on another thread. The closure will be run when the thread
267    // gets suspended. This will return true if the closure was added and will (eventually) be
268    // executed. It returns false otherwise.
269    //
270    // Since multiple closures can be queued and some closures can delay other threads from running no
271    // closure should attempt to suspend another thread while running.
272    // TODO We should add some debug option that verifies this.
273    bool RequestCheckpoint(Closure* function)
274        REQUIRES(Locks::thread_suspend_count_lock_);
275  
276    // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
277    // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
278    // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
279    // will go into while it is awaiting the checkpoint to be run.
280    // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
281    // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
282    // for the closure or the rest of the system.
283    // NB Since multiple closures can be queued and some closures can delay other threads from running
284    // no closure should attempt to suspend another thread while running.
285    bool RequestSynchronousCheckpoint(Closure* function,
286                                      ThreadState suspend_state = ThreadState::kWaiting)
287        REQUIRES_SHARED(Locks::mutator_lock_)
288        RELEASE(Locks::thread_list_lock_)
289        REQUIRES(!Locks::thread_suspend_count_lock_);
290  
291    bool RequestEmptyCheckpoint()
292        REQUIRES(Locks::thread_suspend_count_lock_);
293  
294    void SetFlipFunction(Closure* function);
295    Closure* GetFlipFunction();
296  
GetThreadLocalMarkStack()297    gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
298      CHECK(kUseReadBarrier);
299      return tlsPtr_.thread_local_mark_stack;
300    }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)301    void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
302      CHECK(kUseReadBarrier);
303      tlsPtr_.thread_local_mark_stack = stack;
304    }
305  
306    // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
307    // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
308    void FullSuspendCheck()
309        REQUIRES(!Locks::thread_suspend_count_lock_)
310        REQUIRES_SHARED(Locks::mutator_lock_);
311  
312    // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
313    ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
314        REQUIRES(!Locks::thread_suspend_count_lock_)
315        SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
316  
317    // Transition from runnable into a state where mutator privileges are denied. Releases share of
318    // mutator lock.
319    ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
320        REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
321        UNLOCK_FUNCTION(Locks::mutator_lock_);
322  
323    // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)324    const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
325      Roles::uninterruptible_.Acquire();  // No-op.
326      if (kIsDebugBuild) {
327        CHECK(cause != nullptr);
328        const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
329        tls32_.no_thread_suspension++;
330        tlsPtr_.last_no_thread_suspension_cause = cause;
331        return previous_cause;
332      } else {
333        return nullptr;
334      }
335    }
336  
337    // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)338    void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
339      if (kIsDebugBuild) {
340        CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
341        CHECK_GT(tls32_.no_thread_suspension, 0U);
342        tls32_.no_thread_suspension--;
343        tlsPtr_.last_no_thread_suspension_cause = old_cause;
344      }
345      Roles::uninterruptible_.Release();  // No-op.
346    }
347  
348    void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
349  
350    // Return true if thread suspension is allowable.
351    bool IsThreadSuspensionAllowable() const;
352  
IsDaemon()353    bool IsDaemon() const {
354      return tls32_.daemon;
355    }
356  
357    size_t NumberOfHeldMutexes() const;
358  
359    bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
360  
361    /*
362     * Changes the priority of this thread to match that of the java.lang.Thread object.
363     *
364     * We map a priority value from 1-10 to Linux "nice" values, where lower
365     * numbers indicate higher priority.
366     */
367    void SetNativePriority(int newPriority);
368  
369    /*
370     * Returns the thread priority for the current thread by querying the system.
371     * This is useful when attaching a thread through JNI.
372     *
373     * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
374     */
375    static int GetNativePriority();
376  
377    // Guaranteed to be non-zero.
GetThreadId()378    uint32_t GetThreadId() const {
379      return tls32_.thin_lock_thread_id;
380    }
381  
GetTid()382    pid_t GetTid() const {
383      return tls32_.tid;
384    }
385  
386    // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
387    mirror::String* GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
388  
389    // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
390    // allocation, or locking.
391    void GetThreadName(std::string& name) const;
392  
393    // Sets the thread's name.
394    void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
395  
396    // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
397    uint64_t GetCpuMicroTime() const;
398  
GetPeer()399    mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
400      DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
401      CHECK(tlsPtr_.jpeer == nullptr);
402      return tlsPtr_.opeer;
403    }
404    // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
405    // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
406    // This function will explicitly mark/forward it.
407    mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
408  
HasPeer()409    bool HasPeer() const {
410      return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
411    }
412  
GetStats()413    RuntimeStats* GetStats() {
414      return &tls64_.stats;
415    }
416  
417    bool IsStillStarting() const;
418  
IsExceptionPending()419    bool IsExceptionPending() const {
420      return tlsPtr_.exception != nullptr;
421    }
422  
IsAsyncExceptionPending()423    bool IsAsyncExceptionPending() const {
424      return tlsPtr_.async_exception != nullptr;
425    }
426  
GetException()427    mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
428      return tlsPtr_.exception;
429    }
430  
431    void AssertPendingException() const;
432    void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
433    void AssertNoPendingException() const;
434    void AssertNoPendingExceptionForNewException(const char* msg) const;
435  
436    void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
437  
438    // Set an exception that is asynchronously thrown from a different thread. This will be checked
439    // periodically and might overwrite the current 'Exception'. This can only be called from a
440    // checkpoint.
441    //
442    // The caller should also make sure that the thread has been deoptimized so that the exception
443    // could be detected on back-edges.
444    void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
445        REQUIRES_SHARED(Locks::mutator_lock_);
446  
ClearException()447    void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
448      tlsPtr_.exception = nullptr;
449    }
450  
451    // Move the current async-exception to the main exception. This should be called when the current
452    // thread is ready to deal with any async exceptions. Returns true if there is an async exception
453    // that needs to be dealt with, false otherwise.
454    bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
455  
456    // Find catch block and perform long jump to appropriate exception handle
457    NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
458  
459    Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)460    void ReleaseLongJumpContext(Context* context) {
461      if (tlsPtr_.long_jump_context != nullptr) {
462        // Each QuickExceptionHandler gets a long jump context and uses
463        // it for doing the long jump, after finding catch blocks/doing deoptimization.
464        // Both finding catch blocks and deoptimization can trigger another
465        // exception such as a result of class loading. So there can be nested
466        // cases of exception handling and multiple contexts being used.
467        // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
468        // for reuse so there is no need to always allocate a new one each time when
469        // getting a context. Since we only keep one context for reuse, delete the
470        // existing one since the passed in context is yet to be used for longjump.
471        delete tlsPtr_.long_jump_context;
472      }
473      tlsPtr_.long_jump_context = context;
474    }
475  
476    // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
477    // abort the runtime iff abort_on_error is true.
478    ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
479                                bool check_suspended = true,
480                                bool abort_on_error = true) const
481        REQUIRES_SHARED(Locks::mutator_lock_);
482  
483    // Returns whether the given exception was thrown by the current Java method being executed
484    // (Note that this includes native Java methods).
485    bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
486        REQUIRES_SHARED(Locks::mutator_lock_);
487  
SetTopOfStack(ArtMethod ** top_method)488    void SetTopOfStack(ArtMethod** top_method) {
489      tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
490    }
491  
SetTopOfStackTagged(ArtMethod ** top_method)492    void SetTopOfStackTagged(ArtMethod** top_method) {
493      tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
494    }
495  
SetTopOfShadowStack(ShadowFrame * top)496    void SetTopOfShadowStack(ShadowFrame* top) {
497      tlsPtr_.managed_stack.SetTopShadowFrame(top);
498    }
499  
HasManagedStack()500    bool HasManagedStack() const {
501      return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
502    }
503  
504    // If 'msg' is null, no detail message is set.
505    void ThrowNewException(const char* exception_class_descriptor, const char* msg)
506        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
507  
508    // If 'msg' is null, no detail message is set. An exception must be pending, and will be
509    // used as the new exception's cause.
510    void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
511        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
512  
513    void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
514        __attribute__((format(printf, 3, 4)))
515        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
516  
517    void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
518        REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
519  
520    // OutOfMemoryError is special, because we need to pre-allocate an instance.
521    // Only the GC should call this.
522    void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
523        REQUIRES(!Roles::uninterruptible_);
524  
525    static void Startup();
526    static void FinishStartup();
527    static void Shutdown();
528  
529    // Notify this thread's thread-group that this thread has started.
530    // Note: the given thread-group is used as a fast path and verified in debug build. If the value
531    //       is null, the thread's thread-group is loaded from the peer.
532    void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
533        REQUIRES_SHARED(Locks::mutator_lock_);
534  
535    // JNI methods
GetJniEnv()536    JNIEnvExt* GetJniEnv() const {
537      return tlsPtr_.jni_env;
538    }
539  
540    // Convert a jobject into a Object*
541    ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
542    // Checks if the weak global ref has been cleared by the GC without decoding it.
543    bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
544  
GetMonitorEnterObject()545    mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
546      return tlsPtr_.monitor_enter_object;
547    }
548  
SetMonitorEnterObject(mirror::Object * obj)549    void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
550      tlsPtr_.monitor_enter_object = obj;
551    }
552  
553    // Implements java.lang.Thread.interrupted.
554    bool Interrupted();
555    // Implements java.lang.Thread.isInterrupted.
556    bool IsInterrupted();
557    void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
SetInterrupted(bool i)558    void SetInterrupted(bool i) {
559      tls32_.interrupted.StoreSequentiallyConsistent(i);
560    }
561    void Notify() REQUIRES(!*wait_mutex_);
562  
PoisonObjectPointers()563    ALWAYS_INLINE void PoisonObjectPointers() {
564      ++poison_object_cookie_;
565    }
566  
567    ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
568  
GetPoisonObjectCookie()569    ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
570      return poison_object_cookie_;
571    }
572  
573   private:
574    void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
575  
576   public:
GetWaitMutex()577    Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
578      return wait_mutex_;
579    }
580  
GetWaitConditionVariable()581    ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
582      return wait_cond_;
583    }
584  
GetWaitMonitor()585    Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
586      return wait_monitor_;
587    }
588  
SetWaitMonitor(Monitor * mon)589    void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
590      wait_monitor_ = mon;
591    }
592  
593    // Waiter link-list support.
GetWaitNext()594    Thread* GetWaitNext() const {
595      return tlsPtr_.wait_next;
596    }
597  
SetWaitNext(Thread * next)598    void SetWaitNext(Thread* next) {
599      tlsPtr_.wait_next = next;
600    }
601  
GetClassLoaderOverride()602    jobject GetClassLoaderOverride() {
603      return tlsPtr_.class_loader_override;
604    }
605  
606    void SetClassLoaderOverride(jobject class_loader_override);
607  
608    // Create the internal representation of a stack trace, that is more time
609    // and space efficient to compute than the StackTraceElement[].
610    template<bool kTransactionActive>
611    jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
612        REQUIRES_SHARED(Locks::mutator_lock_);
613  
614    // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
615    // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
616    // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
617    // with the number of valid frames in the returned array.
618    static jobjectArray InternalStackTraceToStackTraceElementArray(
619        const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
620        jobjectArray output_array = nullptr, int* stack_depth = nullptr)
621        REQUIRES_SHARED(Locks::mutator_lock_);
622  
623    jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
624        REQUIRES_SHARED(Locks::mutator_lock_);
625  
HasDebuggerShadowFrames()626    bool HasDebuggerShadowFrames() const {
627      return tlsPtr_.frame_id_to_shadow_frame != nullptr;
628    }
629  
630    void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
631        REQUIRES_SHARED(Locks::mutator_lock_);
632  
VerifyStack()633    void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
634      if (kVerifyStack) {
635        VerifyStackImpl();
636      }
637    }
638  
639    //
640    // Offsets of various members of native Thread class, used by compiled code.
641    //
642  
643    template<PointerSize pointer_size>
ThinLockIdOffset()644    static ThreadOffset<pointer_size> ThinLockIdOffset() {
645      return ThreadOffset<pointer_size>(
646          OFFSETOF_MEMBER(Thread, tls32_) +
647          OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
648    }
649  
650    template<PointerSize pointer_size>
InterruptedOffset()651    static ThreadOffset<pointer_size> InterruptedOffset() {
652      return ThreadOffset<pointer_size>(
653          OFFSETOF_MEMBER(Thread, tls32_) +
654          OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
655    }
656  
657    template<PointerSize pointer_size>
ThreadFlagsOffset()658    static ThreadOffset<pointer_size> ThreadFlagsOffset() {
659      return ThreadOffset<pointer_size>(
660          OFFSETOF_MEMBER(Thread, tls32_) +
661          OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
662    }
663  
664    template<PointerSize pointer_size>
IsGcMarkingOffset()665    static ThreadOffset<pointer_size> IsGcMarkingOffset() {
666      return ThreadOffset<pointer_size>(
667          OFFSETOF_MEMBER(Thread, tls32_) +
668          OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
669    }
670  
IsGcMarkingSize()671    static constexpr size_t IsGcMarkingSize() {
672      return sizeof(tls32_.is_gc_marking);
673    }
674  
675    // Deoptimize the Java stack.
676    void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
677  
678   private:
679    template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)680    static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
681      size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
682      size_t scale;
683      size_t shrink;
684      if (pointer_size == kRuntimePointerSize) {
685        scale = 1;
686        shrink = 1;
687      } else if (pointer_size > kRuntimePointerSize) {
688        scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
689        shrink = 1;
690      } else {
691        DCHECK_GT(kRuntimePointerSize, pointer_size);
692        scale = 1;
693        shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
694      }
695      return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
696    }
697  
698   public:
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)699    static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
700                                                  PointerSize pointer_size) {
701      if (pointer_size == PointerSize::k32) {
702        return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
703            Uint32Value();
704      } else {
705        return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
706            Uint32Value();
707      }
708    }
709  
710    template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)711    static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
712      return ThreadOffsetFromTlsPtr<pointer_size>(
713          OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
714    }
715  
716    template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)717    static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
718      return ThreadOffsetFromTlsPtr<pointer_size>(
719          OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
720    }
721  
722    // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
723    template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)724    static int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
725      // The entry point list defines 30 ReadBarrierMarkRegX entry points.
726      DCHECK_LT(reg, 30u);
727      // The ReadBarrierMarkRegX entry points are ordered by increasing
728      // register number in Thread::tls_Ptr_.quick_entrypoints.
729      return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
730          + static_cast<size_t>(pointer_size) * reg;
731    }
732  
733    template<PointerSize pointer_size>
SelfOffset()734    static ThreadOffset<pointer_size> SelfOffset() {
735      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
736    }
737  
738    template<PointerSize pointer_size>
MterpCurrentIBaseOffset()739    static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
740      return ThreadOffsetFromTlsPtr<pointer_size>(
741          OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
742    }
743  
744    template<PointerSize pointer_size>
MterpDefaultIBaseOffset()745    static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
746      return ThreadOffsetFromTlsPtr<pointer_size>(
747          OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
748    }
749  
750    template<PointerSize pointer_size>
MterpAltIBaseOffset()751    static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
752      return ThreadOffsetFromTlsPtr<pointer_size>(
753          OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
754    }
755  
756    template<PointerSize pointer_size>
ExceptionOffset()757    static ThreadOffset<pointer_size> ExceptionOffset() {
758      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
759    }
760  
761    template<PointerSize pointer_size>
PeerOffset()762    static ThreadOffset<pointer_size> PeerOffset() {
763      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
764    }
765  
766  
767    template<PointerSize pointer_size>
CardTableOffset()768    static ThreadOffset<pointer_size> CardTableOffset() {
769      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
770    }
771  
772    template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()773    static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
774      return ThreadOffsetFromTlsPtr<pointer_size>(
775          OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
776    }
777  
778    template<PointerSize pointer_size>
ThreadLocalPosOffset()779    static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
780      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
781                                                                  thread_local_pos));
782    }
783  
784    template<PointerSize pointer_size>
ThreadLocalEndOffset()785    static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
786      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
787                                                                  thread_local_end));
788    }
789  
790    template<PointerSize pointer_size>
ThreadLocalObjectsOffset()791    static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
792      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
793                                                                  thread_local_objects));
794    }
795  
796    template<PointerSize pointer_size>
RosAllocRunsOffset()797    static ThreadOffset<pointer_size> RosAllocRunsOffset() {
798      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
799                                                                  rosalloc_runs));
800    }
801  
802    template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()803    static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
804      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
805                                                                  thread_local_alloc_stack_top));
806    }
807  
808    template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()809    static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
810      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
811                                                                  thread_local_alloc_stack_end));
812    }
813  
814    // Size of stack less any space reserved for stack overflow
GetStackSize()815    size_t GetStackSize() const {
816      return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
817    }
818  
GetStackEndForInterpreter(bool implicit_overflow_check)819    uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
820      if (implicit_overflow_check) {
821        // The interpreter needs the extra overflow bytes that stack_end does
822        // not include.
823        return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
824      } else {
825        return tlsPtr_.stack_end;
826      }
827    }
828  
GetStackEnd()829    uint8_t* GetStackEnd() const {
830      return tlsPtr_.stack_end;
831    }
832  
833    // Set the stack end to that to be used during a stack overflow
834    void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
835  
836    // Set the stack end to that to be used during regular execution
ResetDefaultStackEnd()837    void ResetDefaultStackEnd() {
838      // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
839      // to throw a StackOverflowError.
840      tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
841    }
842  
IsHandlingStackOverflow()843    bool IsHandlingStackOverflow() const {
844      return tlsPtr_.stack_end == tlsPtr_.stack_begin;
845    }
846  
847    template<PointerSize pointer_size>
StackEndOffset()848    static ThreadOffset<pointer_size> StackEndOffset() {
849      return ThreadOffsetFromTlsPtr<pointer_size>(
850          OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
851    }
852  
853    template<PointerSize pointer_size>
JniEnvOffset()854    static ThreadOffset<pointer_size> JniEnvOffset() {
855      return ThreadOffsetFromTlsPtr<pointer_size>(
856          OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
857    }
858  
859    template<PointerSize pointer_size>
TopOfManagedStackOffset()860    static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
861      return ThreadOffsetFromTlsPtr<pointer_size>(
862          OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
863          ManagedStack::TaggedTopQuickFrameOffset());
864    }
865  
GetManagedStack()866    const ManagedStack* GetManagedStack() const {
867      return &tlsPtr_.managed_stack;
868    }
869  
870    // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)871    void PushManagedStackFragment(ManagedStack* fragment) {
872      tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
873    }
PopManagedStackFragment(const ManagedStack & fragment)874    void PopManagedStackFragment(const ManagedStack& fragment) {
875      tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
876    }
877  
878    ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
879    ALWAYS_INLINE ShadowFrame* PopShadowFrame();
880  
881    template<PointerSize pointer_size>
TopShadowFrameOffset()882    static ThreadOffset<pointer_size> TopShadowFrameOffset() {
883      return ThreadOffsetFromTlsPtr<pointer_size>(
884          OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
885          ManagedStack::TopShadowFrameOffset());
886    }
887  
888    // Is the given obj in this thread's stack indirect reference table?
889    bool HandleScopeContains(jobject obj) const;
890  
891    void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id)
892        REQUIRES_SHARED(Locks::mutator_lock_);
893  
GetTopHandleScope()894    BaseHandleScope* GetTopHandleScope() {
895      return tlsPtr_.top_handle_scope;
896    }
897  
PushHandleScope(BaseHandleScope * handle_scope)898    void PushHandleScope(BaseHandleScope* handle_scope) {
899      DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
900      tlsPtr_.top_handle_scope = handle_scope;
901    }
902  
PopHandleScope()903    BaseHandleScope* PopHandleScope() {
904      BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
905      DCHECK(handle_scope != nullptr);
906      tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
907      return handle_scope;
908    }
909  
910    template<PointerSize pointer_size>
TopHandleScopeOffset()911    static ThreadOffset<pointer_size> TopHandleScopeOffset() {
912      return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
913                                                                  top_handle_scope));
914    }
915  
GetInvokeReq()916    DebugInvokeReq* GetInvokeReq() const {
917      return tlsPtr_.debug_invoke_req;
918    }
919  
GetSingleStepControl()920    SingleStepControl* GetSingleStepControl() const {
921      return tlsPtr_.single_step_control;
922    }
923  
924    // Indicates whether this thread is ready to invoke a method for debugging. This
925    // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()926    bool IsReadyForDebugInvoke() const {
927      return tls32_.ready_for_debug_invoke;
928    }
929  
SetReadyForDebugInvoke(bool ready)930    void SetReadyForDebugInvoke(bool ready) {
931      tls32_.ready_for_debug_invoke = ready;
932    }
933  
IsDebugMethodEntry()934    bool IsDebugMethodEntry() const {
935      return tls32_.debug_method_entry_;
936    }
937  
SetDebugMethodEntry()938    void SetDebugMethodEntry() {
939      tls32_.debug_method_entry_ = true;
940    }
941  
ClearDebugMethodEntry()942    void ClearDebugMethodEntry() {
943      tls32_.debug_method_entry_ = false;
944    }
945  
GetIsGcMarking()946    bool GetIsGcMarking() const {
947      CHECK(kUseReadBarrier);
948      return tls32_.is_gc_marking;
949    }
950  
951    void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
952  
GetWeakRefAccessEnabled()953    bool GetWeakRefAccessEnabled() const {
954      CHECK(kUseReadBarrier);
955      return tls32_.weak_ref_access_enabled;
956    }
957  
SetWeakRefAccessEnabled(bool enabled)958    void SetWeakRefAccessEnabled(bool enabled) {
959      CHECK(kUseReadBarrier);
960      tls32_.weak_ref_access_enabled = enabled;
961    }
962  
GetDisableThreadFlipCount()963    uint32_t GetDisableThreadFlipCount() const {
964      CHECK(kUseReadBarrier);
965      return tls32_.disable_thread_flip_count;
966    }
967  
IncrementDisableThreadFlipCount()968    void IncrementDisableThreadFlipCount() {
969      CHECK(kUseReadBarrier);
970      ++tls32_.disable_thread_flip_count;
971    }
972  
DecrementDisableThreadFlipCount()973    void DecrementDisableThreadFlipCount() {
974      CHECK(kUseReadBarrier);
975      DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
976      --tls32_.disable_thread_flip_count;
977    }
978  
979    // Returns true if the thread is allowed to call into java.
CanCallIntoJava()980    bool CanCallIntoJava() const {
981      return can_call_into_java_;
982    }
983  
SetCanCallIntoJava(bool can_call_into_java)984    void SetCanCallIntoJava(bool can_call_into_java) {
985      can_call_into_java_ = can_call_into_java;
986    }
987  
988    // Activates single step control for debugging. The thread takes the
989    // ownership of the given SingleStepControl*. It is deleted by a call
990    // to DeactivateSingleStepControl or upon thread destruction.
991    void ActivateSingleStepControl(SingleStepControl* ssc);
992  
993    // Deactivates single step control for debugging.
994    void DeactivateSingleStepControl();
995  
996    // Sets debug invoke request for debugging. When the thread is resumed,
997    // it executes the method described by this request then sends the reply
998    // before suspending itself. The thread takes the ownership of the given
999    // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
1000    void SetDebugInvokeReq(DebugInvokeReq* req);
1001  
1002    // Clears debug invoke request for debugging. When the thread completes
1003    // method invocation, it deletes its debug invoke request and suspends
1004    // itself.
1005    void ClearDebugInvokeReq();
1006  
1007    // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1008    static mirror::Throwable* GetDeoptimizationException() {
1009      // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1010      // represented by ObjPtr.
1011      return reinterpret_cast<mirror::Throwable*>(0x100);
1012    }
1013  
1014    // Currently deoptimization invokes verifier which can trigger class loading
1015    // and execute Java code, so there might be nested deoptimizations happening.
1016    // We need to save the ongoing deoptimization shadow frames and return
1017    // values on stacks.
1018    // 'from_code' denotes whether the deoptimization was explicitly made from
1019    // compiled code.
1020    // 'method_type' contains info on whether deoptimization should advance
1021    // dex_pc.
1022    void PushDeoptimizationContext(const JValue& return_value,
1023                                   bool is_reference,
1024                                   ObjPtr<mirror::Throwable> exception,
1025                                   bool from_code,
1026                                   DeoptimizationMethodType method_type)
1027        REQUIRES_SHARED(Locks::mutator_lock_);
1028    void PopDeoptimizationContext(JValue* result,
1029                                  ObjPtr<mirror::Throwable>* exception,
1030                                  bool* from_code,
1031                                  DeoptimizationMethodType* method_type)
1032        REQUIRES_SHARED(Locks::mutator_lock_);
1033    void AssertHasDeoptimizationContext()
1034        REQUIRES_SHARED(Locks::mutator_lock_);
1035    void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1036    ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
1037  
1038    // For debugger, find the shadow frame that corresponds to a frame id.
1039    // Or return null if there is none.
1040    ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1041        REQUIRES_SHARED(Locks::mutator_lock_);
1042    // For debugger, find the bool array that keeps track of the updated vreg set
1043    // for a frame id.
1044    bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1045    // For debugger, find the shadow frame that corresponds to a frame id. If
1046    // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1047    ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1048                                                 uint32_t num_vregs,
1049                                                 ArtMethod* method,
1050                                                 uint32_t dex_pc)
1051        REQUIRES_SHARED(Locks::mutator_lock_);
1052  
1053    // Delete the entry that maps from frame_id to shadow_frame.
1054    void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1055        REQUIRES_SHARED(Locks::mutator_lock_);
1056  
GetInstrumentationStack()1057    std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
1058      return tlsPtr_.instrumentation_stack;
1059    }
1060  
GetStackTraceSample()1061    std::vector<ArtMethod*>* GetStackTraceSample() const {
1062      DCHECK(!IsAotCompiler());
1063      return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1064    }
1065  
SetStackTraceSample(std::vector<ArtMethod * > * sample)1066    void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1067      DCHECK(!IsAotCompiler());
1068      tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1069    }
1070  
GetVerifierDeps()1071    verifier::VerifierDeps* GetVerifierDeps() const {
1072      DCHECK(IsAotCompiler());
1073      return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1074    }
1075  
1076    // It is the responsability of the caller to make sure the verifier_deps
1077    // entry in the thread is cleared before destruction of the actual VerifierDeps
1078    // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1079    void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1080      DCHECK(IsAotCompiler());
1081      DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1082      tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1083    }
1084  
GetTraceClockBase()1085    uint64_t GetTraceClockBase() const {
1086      return tls64_.trace_clock_base;
1087    }
1088  
SetTraceClockBase(uint64_t clock_base)1089    void SetTraceClockBase(uint64_t clock_base) {
1090      tls64_.trace_clock_base = clock_base;
1091    }
1092  
GetHeldMutex(LockLevel level)1093    BaseMutex* GetHeldMutex(LockLevel level) const {
1094      return tlsPtr_.held_mutexes[level];
1095    }
1096  
SetHeldMutex(LockLevel level,BaseMutex * mutex)1097    void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1098      tlsPtr_.held_mutexes[level] = mutex;
1099    }
1100  
1101    void ClearSuspendBarrier(AtomicInteger* target)
1102        REQUIRES(Locks::thread_suspend_count_lock_);
1103  
ReadFlag(ThreadFlag flag)1104    bool ReadFlag(ThreadFlag flag) const {
1105      return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1106    }
1107  
TestAllFlags()1108    bool TestAllFlags() const {
1109      return (tls32_.state_and_flags.as_struct.flags != 0);
1110    }
1111  
AtomicSetFlag(ThreadFlag flag)1112    void AtomicSetFlag(ThreadFlag flag) {
1113      tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseOrSequentiallyConsistent(flag);
1114    }
1115  
AtomicClearFlag(ThreadFlag flag)1116    void AtomicClearFlag(ThreadFlag flag) {
1117      tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseAndSequentiallyConsistent(-1 ^ flag);
1118    }
1119  
1120    void ResetQuickAllocEntryPointsForThread(bool is_marking);
1121  
1122    // Returns the remaining space in the TLAB.
TlabSize()1123    size_t TlabSize() const {
1124      return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1125    }
1126  
1127    // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1128    size_t TlabRemainingCapacity() const {
1129      return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1130    }
1131  
1132    // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1133    void ExpandTlab(size_t bytes) {
1134      tlsPtr_.thread_local_end += bytes;
1135      DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1136    }
1137  
1138    // Doesn't check that there is room.
1139    mirror::Object* AllocTlab(size_t bytes);
1140    void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1141    bool HasTlab() const;
GetTlabStart()1142    uint8_t* GetTlabStart() {
1143      return tlsPtr_.thread_local_start;
1144    }
GetTlabPos()1145    uint8_t* GetTlabPos() {
1146      return tlsPtr_.thread_local_pos;
1147    }
1148  
1149    // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1150    // equal to a valid pointer.
1151    // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1152    void RemoveSuspendTrigger() {
1153      tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1154    }
1155  
1156    // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1157    // The next time a suspend check is done, it will load from the value at this address
1158    // and trigger a SIGSEGV.
TriggerSuspend()1159    void TriggerSuspend() {
1160      tlsPtr_.suspend_trigger = nullptr;
1161    }
1162  
1163  
1164    // Push an object onto the allocation stack.
1165    bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1166        REQUIRES_SHARED(Locks::mutator_lock_);
1167  
1168    // Set the thread local allocation pointers to the given pointers.
1169    void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1170                                       StackReference<mirror::Object>* end);
1171  
1172    // Resets the thread local allocation pointers.
1173    void RevokeThreadLocalAllocationStack();
1174  
GetThreadLocalBytesAllocated()1175    size_t GetThreadLocalBytesAllocated() const {
1176      return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1177    }
1178  
GetThreadLocalObjectsAllocated()1179    size_t GetThreadLocalObjectsAllocated() const {
1180      return tlsPtr_.thread_local_objects;
1181    }
1182  
GetRosAllocRun(size_t index)1183    void* GetRosAllocRun(size_t index) const {
1184      return tlsPtr_.rosalloc_runs[index];
1185    }
1186  
SetRosAllocRun(size_t index,void * run)1187    void SetRosAllocRun(size_t index, void* run) {
1188      tlsPtr_.rosalloc_runs[index] = run;
1189    }
1190  
1191    bool ProtectStack(bool fatal_on_error = true);
1192    bool UnprotectStack();
1193  
SetMterpDefaultIBase(void * ibase)1194    void SetMterpDefaultIBase(void* ibase) {
1195      tlsPtr_.mterp_default_ibase = ibase;
1196    }
1197  
SetMterpCurrentIBase(void * ibase)1198    void SetMterpCurrentIBase(void* ibase) {
1199      tlsPtr_.mterp_current_ibase = ibase;
1200    }
1201  
SetMterpAltIBase(void * ibase)1202    void SetMterpAltIBase(void* ibase) {
1203      tlsPtr_.mterp_alt_ibase = ibase;
1204    }
1205  
GetMterpDefaultIBase()1206    const void* GetMterpDefaultIBase() const {
1207      return tlsPtr_.mterp_default_ibase;
1208    }
1209  
GetMterpCurrentIBase()1210    const void* GetMterpCurrentIBase() const {
1211      return tlsPtr_.mterp_current_ibase;
1212    }
1213  
GetMterpAltIBase()1214    const void* GetMterpAltIBase() const {
1215      return tlsPtr_.mterp_alt_ibase;
1216    }
1217  
HandlingSignal()1218    bool HandlingSignal() const {
1219      return tls32_.handling_signal_;
1220    }
1221  
SetHandlingSignal(bool handling_signal)1222    void SetHandlingSignal(bool handling_signal) {
1223      tls32_.handling_signal_ = handling_signal;
1224    }
1225  
IsTransitioningToRunnable()1226    bool IsTransitioningToRunnable() const {
1227      return tls32_.is_transitioning_to_runnable;
1228    }
1229  
SetIsTransitioningToRunnable(bool value)1230    void SetIsTransitioningToRunnable(bool value) {
1231      tls32_.is_transitioning_to_runnable = value;
1232    }
1233  
1234    void PushVerifier(verifier::MethodVerifier* verifier);
1235    void PopVerifier(verifier::MethodVerifier* verifier);
1236  
1237    void InitStringEntryPoints();
1238  
ModifyDebugDisallowReadBarrier(int8_t delta)1239    void ModifyDebugDisallowReadBarrier(int8_t delta) {
1240      debug_disallow_read_barrier_ += delta;
1241    }
1242  
GetDebugDisallowReadBarrierCount()1243    uint8_t GetDebugDisallowReadBarrierCount() const {
1244      return debug_disallow_read_barrier_;
1245    }
1246  
GetCustomTLS()1247    void* GetCustomTLS() const REQUIRES(Locks::thread_list_lock_) {
1248      return custom_tls_;
1249    }
1250  
SetCustomTLS(void * data)1251    void SetCustomTLS(void* data) REQUIRES(Locks::thread_list_lock_) {
1252      custom_tls_ = data;
1253    }
1254  
1255    // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1256    bool IsJitSensitiveThread() const {
1257      return this == jit_sensitive_thread_;
1258    }
1259  
1260    // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1261    static bool IsSensitiveThread() {
1262      if (is_sensitive_thread_hook_ != nullptr) {
1263        return (*is_sensitive_thread_hook_)();
1264      }
1265      return false;
1266    }
1267  
1268    // Set to the read barrier marking entrypoints to be non-null.
1269    void SetReadBarrierEntrypoints();
1270  
1271    static jobject CreateCompileTimePeer(JNIEnv* env,
1272                                         const char* name,
1273                                         bool as_daemon,
1274                                         jobject thread_group)
1275        REQUIRES_SHARED(Locks::mutator_lock_);
1276  
1277   private:
1278    explicit Thread(bool daemon);
1279    ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1280    void Destroy();
1281  
1282    // Attaches the calling native thread to the runtime, returning the new native peer.
1283    // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1284    template <typename PeerAction>
1285    static Thread* Attach(const char* thread_name,
1286                          bool as_daemon,
1287                          PeerAction p);
1288  
1289    void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1290  
1291    template<bool kTransactionActive>
1292    static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1293                         ObjPtr<mirror::Object> peer,
1294                         jboolean thread_is_daemon,
1295                         jobject thread_group,
1296                         jobject thread_name,
1297                         jint thread_priority)
1298        REQUIRES_SHARED(Locks::mutator_lock_);
1299  
1300    // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
1301    // Dbg::ManageDeoptimization.
SetStateUnsafe(ThreadState new_state)1302    ThreadState SetStateUnsafe(ThreadState new_state) {
1303      ThreadState old_state = GetState();
1304      if (old_state == kRunnable && new_state != kRunnable) {
1305        // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1306        // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1307        // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1308        TransitionToSuspendedAndRunCheckpoints(new_state);
1309        // Since we transitioned to a suspended state, check the pass barrier requests.
1310        PassActiveSuspendBarriers();
1311      } else {
1312        tls32_.state_and_flags.as_struct.state = new_state;
1313      }
1314      return old_state;
1315    }
1316  
1317    void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1318  
1319    void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1320    void DumpStack(std::ostream& os,
1321                   bool dump_native_stack = true,
1322                   BacktraceMap* backtrace_map = nullptr,
1323                   bool force_dump_stack = false) const
1324        REQUIRES(!Locks::thread_suspend_count_lock_)
1325        REQUIRES_SHARED(Locks::mutator_lock_);
1326  
1327    // Out-of-line conveniences for debugging in gdb.
1328    static Thread* CurrentFromGdb();  // Like Thread::Current.
1329    // Like Thread::Dump(std::cerr).
1330    void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1331  
1332    static void* CreateCallback(void* arg);
1333  
1334    void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1335        REQUIRES_SHARED(Locks::mutator_lock_);
1336    void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1337        REQUIRES_SHARED(Locks::mutator_lock_);
1338  
1339    // Initialize a thread.
1340    //
1341    // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1342    // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1343    // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1344    // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1345    // of false).
1346    bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1347        REQUIRES(Locks::runtime_shutdown_lock_);
1348    void InitCardTable();
1349    void InitCpu();
1350    void CleanupCpu();
1351    void InitTlsEntryPoints();
1352    void InitTid();
1353    void InitPthreadKeySelf();
1354    bool InitStackHwm();
1355  
1356    void SetUpAlternateSignalStack();
1357    void TearDownAlternateSignalStack();
1358  
1359    ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1360        REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1361  
1362    ALWAYS_INLINE void PassActiveSuspendBarriers()
1363        REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1364  
1365    // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1366    static void SetJitSensitiveThread() {
1367      if (jit_sensitive_thread_ == nullptr) {
1368        jit_sensitive_thread_ = Thread::Current();
1369      } else {
1370        LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1371            << Thread::Current()->GetTid();
1372      }
1373    }
1374  
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1375    static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1376      is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1377    }
1378  
1379    bool ModifySuspendCountInternal(Thread* self,
1380                                    int delta,
1381                                    AtomicInteger* suspend_barrier,
1382                                    SuspendReason reason)
1383        WARN_UNUSED
1384        REQUIRES(Locks::thread_suspend_count_lock_);
1385  
1386    // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1387    // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1388    // the kCheckpointRequest flag is cleared.
1389    void RunCheckpointFunction();
1390    void RunEmptyCheckpoint();
1391  
1392    bool PassActiveSuspendBarriers(Thread* self)
1393        REQUIRES(!Locks::thread_suspend_count_lock_);
1394  
1395    // Install the protected region for implicit stack checks.
1396    void InstallImplicitProtection();
1397  
1398    template <bool kPrecise>
1399    void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1400  
1401    static bool IsAotCompiler();
1402  
1403    // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1404    // change from being Suspended to Runnable without a suspend request occurring.
1405    union PACKED(4) StateAndFlags {
StateAndFlags()1406      StateAndFlags() {}
1407      struct PACKED(4) {
1408        // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1409        // ThreadFlags for bit field meanings.
1410        volatile uint16_t flags;
1411        // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1412        // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1413        // operation. If a thread is suspended and a suspend_request is present, a thread may not
1414        // change to Runnable as a GC or other operation is in progress.
1415        volatile uint16_t state;
1416      } as_struct;
1417      AtomicInteger as_atomic_int;
1418      volatile int32_t as_int;
1419  
1420     private:
1421      // gcc does not handle struct with volatile member assignments correctly.
1422      // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1423      DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1424    };
1425    static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1426  
1427    static void ThreadExitCallback(void* arg);
1428  
1429    // Maximum number of suspend barriers.
1430    static constexpr uint32_t kMaxSuspendBarriers = 3;
1431  
1432    // Has Thread::Startup been called?
1433    static bool is_started_;
1434  
1435    // TLS key used to retrieve the Thread*.
1436    static pthread_key_t pthread_key_self_;
1437  
1438    // Used to notify threads that they should attempt to resume, they will suspend again if
1439    // their suspend count is > 0.
1440    static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1441  
1442    // Hook passed by framework which returns true
1443    // when StrictMode events are traced for the current thread.
1444    static bool (*is_sensitive_thread_hook_)();
1445    // Stores the jit sensitive thread (which for now is the UI thread).
1446    static Thread* jit_sensitive_thread_;
1447  
1448    /***********************************************************************************************/
1449    // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1450    // pointer size differences. To encourage shorter encoding, more frequently used values appear
1451    // first if possible.
1452    /***********************************************************************************************/
1453  
1454    struct PACKED(4) tls_32bit_sized_values {
1455      // We have no control over the size of 'bool', but want our boolean fields
1456      // to be 4-byte quantities.
1457      typedef uint32_t bool32_t;
1458  
tls_32bit_sized_valuestls_32bit_sized_values1459      explicit tls_32bit_sized_values(bool is_daemon) :
1460        suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1461        daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
1462        thread_exit_check_count(0), handling_signal_(false),
1463        is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
1464        debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
1465        disable_thread_flip_count(0), user_code_suspend_count(0) {
1466      }
1467  
1468      union StateAndFlags state_and_flags;
1469      static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1470                    "Size of state_and_flags and int32 are different");
1471  
1472      // A non-zero value is used to tell the current thread to enter a safe point
1473      // at the next poll.
1474      int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1475  
1476      // How much of 'suspend_count_' is by request of the debugger, used to set things right
1477      // when the debugger detaches. Must be <= suspend_count_.
1478      int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1479  
1480      // Thin lock thread id. This is a small integer used by the thin lock implementation.
1481      // This is not to be confused with the native thread's tid, nor is it the value returned
1482      // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1483      // important difference between this id and the ids visible to managed code is that these
1484      // ones get reused (to ensure that they fit in the number of bits available).
1485      uint32_t thin_lock_thread_id;
1486  
1487      // System thread id.
1488      uint32_t tid;
1489  
1490      // Is the thread a daemon?
1491      const bool32_t daemon;
1492  
1493      // A boolean telling us whether we're recursively throwing OOME.
1494      bool32_t throwing_OutOfMemoryError;
1495  
1496      // A positive value implies we're in a region where thread suspension isn't expected.
1497      uint32_t no_thread_suspension;
1498  
1499      // How many times has our pthread key's destructor been called?
1500      uint32_t thread_exit_check_count;
1501  
1502      // True if signal is being handled by this thread.
1503      bool32_t handling_signal_;
1504  
1505      // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1506      // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1507      // the rest of them.
1508      bool32_t is_transitioning_to_runnable;
1509  
1510      // True if the thread has been suspended by a debugger event. This is
1511      // used to invoke method from the debugger which is only allowed when
1512      // the thread is suspended by an event.
1513      bool32_t ready_for_debug_invoke;
1514  
1515      // True if the thread enters a method. This is used to detect method entry
1516      // event for the debugger.
1517      bool32_t debug_method_entry_;
1518  
1519      // True if the GC is in the marking phase. This is used for the CC collector only. This is
1520      // thread local so that we can simplify the logic to check for the fast path of read barriers of
1521      // GC roots.
1522      bool32_t is_gc_marking;
1523  
1524      // Thread "interrupted" status; stays raised until queried or thrown.
1525      Atomic<bool32_t> interrupted;
1526  
1527      // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1528      // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1529      // processing of the CC collector only. This is thread local so that we can enable/disable weak
1530      // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1531      // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1532      // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1533      // ReferenceProcessor::EnableSlowPath().
1534      bool32_t weak_ref_access_enabled;
1535  
1536      // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1537      // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1538      // critical section enter.
1539      uint32_t disable_thread_flip_count;
1540  
1541      // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1542      // suspended by the runtime from those suspended by user code.
1543      // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1544      // told that AssertHeld should be good enough.
1545      int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1546    } tls32_;
1547  
1548    struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1549      tls_64bit_sized_values() : trace_clock_base(0) {
1550      }
1551  
1552      // The clock base used for tracing.
1553      uint64_t trace_clock_base;
1554  
1555      RuntimeStats stats;
1556    } tls64_;
1557  
PACKED(sizeof (void *))1558    struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1559        tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1560        managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1561        self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1562        deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1563        top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1564        instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
1565        stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1566        frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1567        last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1568        thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1569        thread_local_limit(nullptr),
1570        thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
1571        mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1572        thread_local_alloc_stack_end(nullptr),
1573        flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
1574        async_exception(nullptr) {
1575        std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1576      }
1577  
1578      // The biased card table, see CardTable for details.
1579      uint8_t* card_table;
1580  
1581      // The pending exception or null.
1582      mirror::Throwable* exception;
1583  
1584      // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1585      // We leave extra space so there's room for the code that throws StackOverflowError.
1586      uint8_t* stack_end;
1587  
1588      // The top of the managed stack often manipulated directly by compiler generated code.
1589      ManagedStack managed_stack;
1590  
1591      // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1592      // normally set to the address of itself.
1593      uintptr_t* suspend_trigger;
1594  
1595      // Every thread may have an associated JNI environment
1596      JNIEnvExt* jni_env;
1597  
1598      // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1599      // created thread.
1600      JNIEnvExt* tmp_jni_env;
1601  
1602      // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1603      // is easy but getting the address of Thread::Current is hard. This field can be read off of
1604      // Thread::Current to give the address.
1605      Thread* self;
1606  
1607      // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1608      // start up, until the thread is registered and the local opeer_ is used.
1609      mirror::Object* opeer;
1610      jobject jpeer;
1611  
1612      // The "lowest addressable byte" of the stack.
1613      uint8_t* stack_begin;
1614  
1615      // Size of the stack.
1616      size_t stack_size;
1617  
1618      // Sampling profiler and AOT verification cannot happen on the same run, so we share
1619      // the same entry for the stack trace and the verifier deps.
1620      union DepsOrStackTraceSample {
1621        DepsOrStackTraceSample() {
1622          verifier_deps = nullptr;
1623          stack_trace_sample = nullptr;
1624        }
1625        // Pointer to previous stack trace captured by sampling profiler.
1626        std::vector<ArtMethod*>* stack_trace_sample;
1627        // When doing AOT verification, per-thread VerifierDeps.
1628        verifier::VerifierDeps* verifier_deps;
1629      } deps_or_stack_trace_sample;
1630  
1631      // The next thread in the wait set this thread is part of or null if not waiting.
1632      Thread* wait_next;
1633  
1634      // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1635      mirror::Object* monitor_enter_object;
1636  
1637      // Top of linked list of handle scopes or null for none.
1638      BaseHandleScope* top_handle_scope;
1639  
1640      // Needed to get the right ClassLoader in JNI_OnLoad, but also
1641      // useful for testing.
1642      jobject class_loader_override;
1643  
1644      // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1645      Context* long_jump_context;
1646  
1647      // Additional stack used by method instrumentation to store method and return pc values.
1648      // Stored as a pointer since std::deque is not PACKED.
1649      std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1650  
1651      // JDWP invoke-during-breakpoint support.
1652      DebugInvokeReq* debug_invoke_req;
1653  
1654      // JDWP single-stepping support.
1655      SingleStepControl* single_step_control;
1656  
1657      // For gc purpose, a shadow frame record stack that keeps track of:
1658      // 1) shadow frames under construction.
1659      // 2) deoptimization shadow frames.
1660      StackedShadowFrameRecord* stacked_shadow_frame_record;
1661  
1662      // Deoptimization return value record stack.
1663      DeoptimizationContextRecord* deoptimization_context_stack;
1664  
1665      // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1666      // Shadow frames may be created before deoptimization happens so that the debugger can
1667      // set local values there first.
1668      FrameIdToShadowFrame* frame_id_to_shadow_frame;
1669  
1670      // A cached copy of the java.lang.Thread's name.
1671      std::string* name;
1672  
1673      // A cached pthread_t for the pthread underlying this Thread*.
1674      pthread_t pthread_self;
1675  
1676      // If no_thread_suspension_ is > 0, what is causing that assertion.
1677      const char* last_no_thread_suspension_cause;
1678  
1679      // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1680      // requests another checkpoint, it goes to the checkpoint overflow list.
1681      Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1682  
1683      // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1684      // Locks::thread_suspend_count_lock_.
1685      // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1686      // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1687      AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1688  
1689      // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1690      uint8_t* thread_local_start;
1691  
1692      // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1693      // potentially better performance.
1694      uint8_t* thread_local_pos;
1695      uint8_t* thread_local_end;
1696  
1697      // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1698      // equal to thread_local_end.
1699      uint8_t* thread_local_limit;
1700  
1701      size_t thread_local_objects;
1702  
1703      // Entrypoint function pointers.
1704      // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1705      JniEntryPoints jni_entrypoints;
1706      QuickEntryPoints quick_entrypoints;
1707  
1708      // Mterp jump table bases.
1709      void* mterp_current_ibase;
1710      void* mterp_default_ibase;
1711      void* mterp_alt_ibase;
1712  
1713      // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1714      void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1715  
1716      // Thread-local allocation stack data/routines.
1717      StackReference<mirror::Object>* thread_local_alloc_stack_top;
1718      StackReference<mirror::Object>* thread_local_alloc_stack_end;
1719  
1720      // Support for Mutex lock hierarchy bug detection.
1721      BaseMutex* held_mutexes[kLockLevelCount];
1722  
1723      // The function used for thread flip.
1724      Closure* flip_function;
1725  
1726      // Current method verifier, used for root marking.
1727      verifier::MethodVerifier* method_verifier;
1728  
1729      // Thread-local mark stack for the concurrent copying collector.
1730      gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1731  
1732      // The pending async-exception or null.
1733      mirror::Throwable* async_exception;
1734    } tlsPtr_;
1735  
1736    // Guards the 'wait_monitor_' members.
1737    Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1738  
1739    // Condition variable waited upon during a wait.
1740    ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1741    // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1742    Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1743  
1744    // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1745    uint8_t debug_disallow_read_barrier_ = 0;
1746  
1747    // Note that it is not in the packed struct, may not be accessed for cross compilation.
1748    uintptr_t poison_object_cookie_ = 0;
1749  
1750    // Pending extra checkpoints if checkpoint_function_ is already used.
1751    std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1752  
1753    // Custom TLS field that can be used by plugins.
1754    // TODO: Generalize once we have more plugins.
1755    void* custom_tls_;
1756  
1757    // True if the thread is allowed to call back into java (for e.g. during class resolution).
1758    // By default this is true.
1759    bool can_call_into_java_;
1760  
1761    friend class Dbg;  // For SetStateUnsafe.
1762    friend class gc::collector::SemiSpace;  // For getting stack traces.
1763    friend class Runtime;  // For CreatePeer.
1764    friend class QuickExceptionHandler;  // For dumping the stack.
1765    friend class ScopedThreadStateChange;
1766    friend class StubTest;  // For accessing entrypoints.
1767    friend class ThreadList;  // For ~Thread and Destroy.
1768  
1769    friend class EntrypointsOrderTest;  // To test the order of tls entries.
1770  
1771    DISALLOW_COPY_AND_ASSIGN(Thread);
1772  };
1773  
1774  class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1775   public:
1776    ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
1777                                                 bool enabled = true)
ACQUIRE(Roles::uninterruptible_)1778        ACQUIRE(Roles::uninterruptible_)
1779        : enabled_(enabled) {
1780      if (!enabled_) {
1781        return;
1782      }
1783      if (kIsDebugBuild) {
1784        self_ = Thread::Current();
1785        old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1786      } else {
1787        Roles::uninterruptible_.Acquire();  // No-op.
1788      }
1789    }
~ScopedAssertNoThreadSuspension()1790    ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1791      if (!enabled_) {
1792        return;
1793      }
1794      if (kIsDebugBuild) {
1795        self_->EndAssertNoThreadSuspension(old_cause_);
1796      } else {
1797        Roles::uninterruptible_.Release();  // No-op.
1798      }
1799    }
1800  
1801   private:
1802    Thread* self_;
1803    const bool enabled_;
1804    const char* old_cause_;
1805  };
1806  
1807  class ScopedStackedShadowFramePusher {
1808   public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1809    ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1810      : self_(self), type_(type) {
1811      self_->PushStackedShadowFrame(sf, type);
1812    }
~ScopedStackedShadowFramePusher()1813    ~ScopedStackedShadowFramePusher() {
1814      self_->PopStackedShadowFrame(type_);
1815    }
1816  
1817   private:
1818    Thread* const self_;
1819    const StackedShadowFrameType type_;
1820  
1821    DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1822  };
1823  
1824  // Only works for debug builds.
1825  class ScopedDebugDisallowReadBarriers {
1826   public:
ScopedDebugDisallowReadBarriers(Thread * self)1827    explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1828      self_->ModifyDebugDisallowReadBarrier(1);
1829    }
~ScopedDebugDisallowReadBarriers()1830    ~ScopedDebugDisallowReadBarriers() {
1831      self_->ModifyDebugDisallowReadBarrier(-1);
1832    }
1833  
1834   private:
1835    Thread* const self_;
1836  };
1837  
1838  class ScopedTransitioningToRunnable : public ValueObject {
1839   public:
ScopedTransitioningToRunnable(Thread * self)1840    explicit ScopedTransitioningToRunnable(Thread* self)
1841        : self_(self) {
1842      DCHECK_EQ(self, Thread::Current());
1843      if (kUseReadBarrier) {
1844        self_->SetIsTransitioningToRunnable(true);
1845      }
1846    }
1847  
~ScopedTransitioningToRunnable()1848    ~ScopedTransitioningToRunnable() {
1849      if (kUseReadBarrier) {
1850        self_->SetIsTransitioningToRunnable(false);
1851      }
1852    }
1853  
1854   private:
1855    Thread* const self_;
1856  };
1857  
1858  class ThreadLifecycleCallback {
1859   public:
~ThreadLifecycleCallback()1860    virtual ~ThreadLifecycleCallback() {}
1861  
1862    virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1863    virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1864  };
1865  
1866  std::ostream& operator<<(std::ostream& os, const Thread& thread);
1867  std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
1868  
1869  }  // namespace art
1870  
1871  #endif  // ART_RUNTIME_THREAD_H_
1872