1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <bitset>
21 #include <deque>
22 #include <iosfwd>
23 #include <list>
24 #include <memory>
25 #include <setjmp.h>
26 #include <string>
27 
28 #include "arch/context.h"
29 #include "arch/instruction_set.h"
30 #include "atomic.h"
31 #include "base/enums.h"
32 #include "base/macros.h"
33 #include "base/mutex.h"
34 #include "entrypoints/jni/jni_entrypoints.h"
35 #include "entrypoints/quick/quick_entrypoints.h"
36 #include "gc_root.h"
37 #include "globals.h"
38 #include "handle_scope.h"
39 #include "instrumentation.h"
40 #include "jvalue.h"
41 #include "object_callbacks.h"
42 #include "offsets.h"
43 #include "runtime_stats.h"
44 #include "stack.h"
45 #include "thread_state.h"
46 
47 class BacktraceMap;
48 
49 namespace art {
50 
51 namespace gc {
52 namespace accounting {
53   template<class T> class AtomicStack;
54 }  // namespace accounting
55 namespace collector {
56   class SemiSpace;
57 }  // namespace collector
58 }  // namespace gc
59 
60 namespace mirror {
61   class Array;
62   class Class;
63   class ClassLoader;
64   class Object;
65   template<class T> class ObjectArray;
66   template<class T> class PrimitiveArray;
67   typedef PrimitiveArray<int32_t> IntArray;
68   class StackTraceElement;
69   class String;
70   class Throwable;
71 }  // namespace mirror
72 
73 namespace verifier {
74   class MethodVerifier;
75   class VerifierDeps;
76 }  // namespace verifier
77 
78 class ArtMethod;
79 class BaseMutex;
80 class ClassLinker;
81 class Closure;
82 class Context;
83 struct DebugInvokeReq;
84 class DeoptimizationContextRecord;
85 class DexFile;
86 class FrameIdToShadowFrame;
87 class JavaVMExt;
88 struct JNIEnvExt;
89 class Monitor;
90 class ScopedObjectAccessAlreadyRunnable;
91 class ShadowFrame;
92 class SingleStepControl;
93 class StackedShadowFrameRecord;
94 class Thread;
95 class ThreadList;
96 
97 // Thread priorities. These must match the Thread.MIN_PRIORITY,
98 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
99 enum ThreadPriority {
100   kMinThreadPriority = 1,
101   kNormThreadPriority = 5,
102   kMaxThreadPriority = 10,
103 };
104 
105 enum ThreadFlag {
106   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
107                           // safepoint handler.
108   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
109   kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
110   kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
111 };
112 
113 enum class StackedShadowFrameType {
114   kShadowFrameUnderConstruction,
115   kDeoptimizationShadowFrame,
116 };
117 
118 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
119 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
120 
121 // Thread's stack layout for implicit stack overflow checks:
122 //
123 //   +---------------------+  <- highest address of stack memory
124 //   |                     |
125 //   .                     .  <- SP
126 //   |                     |
127 //   |                     |
128 //   +---------------------+  <- stack_end
129 //   |                     |
130 //   |  Gap                |
131 //   |                     |
132 //   +---------------------+  <- stack_begin
133 //   |                     |
134 //   | Protected region    |
135 //   |                     |
136 //   +---------------------+  <- lowest address of stack memory
137 //
138 // The stack always grows down in memory.  At the lowest address is a region of memory
139 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
140 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
141 // between the stack_end and the highest address in stack memory.  An implicit stack
142 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
143 // If the thread's SP is below the stack_end address this will be a read into the protected
144 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
145 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
146 // if the thread makes a call out to a native function (through JNI), that native function
147 // might only have 4K of memory (if the SP is adjacent to stack_end).
148 
149 class Thread {
150  public:
151   static const size_t kStackOverflowImplicitCheckSize;
152 
153   // Creates a new native thread corresponding to the given managed peer.
154   // Used to implement Thread.start.
155   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
156 
157   // Attaches the calling native thread to the runtime, returning the new native peer.
158   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
159   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
160                         bool create_peer);
161   // Attaches the calling native thread to the runtime, returning the new native peer.
162   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
163 
164   // Reset internal state of child thread after fork.
165   void InitAfterFork();
166 
167   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
168   // high cost and so we favor passing self around when possible.
169   // TODO: mark as PURE so the compiler may coalesce and remove?
170   static Thread* Current();
171 
172   // On a runnable thread, check for pending thread suspension request and handle if pending.
173   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
174 
175   // Process pending thread suspension request and handle if pending.
176   void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
177 
178   // Process a pending empty checkpoint if pending.
179   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
180   void CheckEmptyCheckpointFromMutex();
181 
182   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
183                                    ObjPtr<mirror::Object> thread_peer)
184       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
185       REQUIRES_SHARED(Locks::mutator_lock_);
186   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
187       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
188       REQUIRES_SHARED(Locks::mutator_lock_);
189 
190   // Translates 172 to pAllocArrayFromCode and so on.
191   template<PointerSize size_of_pointers>
192   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
193 
194   // Dumps a one-line summary of thread state (used for operator<<).
195   void ShortDump(std::ostream& os) const;
196 
197   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
198   void Dump(std::ostream& os,
199             bool dump_native_stack = true,
200             BacktraceMap* backtrace_map = nullptr,
201             bool force_dump_stack = false) const
202       REQUIRES(!Locks::thread_suspend_count_lock_)
203       REQUIRES_SHARED(Locks::mutator_lock_);
204 
205   void DumpJavaStack(std::ostream& os,
206                      bool check_suspended = true,
207                      bool dump_locks = true) const
208       REQUIRES(!Locks::thread_suspend_count_lock_)
209       REQUIRES_SHARED(Locks::mutator_lock_);
210 
211   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
212   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
213   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
214       REQUIRES(!Locks::thread_suspend_count_lock_)
215       REQUIRES_SHARED(Locks::mutator_lock_);
216 
GetState()217   ThreadState GetState() const {
218     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
219     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
220     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
221   }
222 
223   ThreadState SetState(ThreadState new_state);
224 
GetSuspendCount()225   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
226     return tls32_.suspend_count;
227   }
228 
GetDebugSuspendCount()229   int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
230     return tls32_.debug_suspend_count;
231   }
232 
IsSuspended()233   bool IsSuspended() const {
234     union StateAndFlags state_and_flags;
235     state_and_flags.as_int = tls32_.state_and_flags.as_int;
236     return state_and_flags.as_struct.state != kRunnable &&
237         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
238   }
239 
240   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
241   // release thread_suspend_count_lock_ internally.
242   ALWAYS_INLINE
243   bool ModifySuspendCount(Thread* self,
244                           int delta,
245                           AtomicInteger* suspend_barrier,
246                           bool for_debugger)
247       WARN_UNUSED
248       REQUIRES(Locks::thread_suspend_count_lock_);
249 
250   bool RequestCheckpoint(Closure* function)
251       REQUIRES(Locks::thread_suspend_count_lock_);
252   void RequestSynchronousCheckpoint(Closure* function)
253       REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::thread_list_lock_);
254   bool RequestEmptyCheckpoint()
255       REQUIRES(Locks::thread_suspend_count_lock_);
256 
257   void SetFlipFunction(Closure* function);
258   Closure* GetFlipFunction();
259 
GetThreadLocalMarkStack()260   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
261     CHECK(kUseReadBarrier);
262     return tlsPtr_.thread_local_mark_stack;
263   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)264   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
265     CHECK(kUseReadBarrier);
266     tlsPtr_.thread_local_mark_stack = stack;
267   }
268 
269   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
270   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
271   void FullSuspendCheck()
272       REQUIRES(!Locks::thread_suspend_count_lock_)
273       REQUIRES_SHARED(Locks::mutator_lock_);
274 
275   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
276   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
277       REQUIRES(!Locks::thread_suspend_count_lock_)
278       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
279 
280   // Transition from runnable into a state where mutator privileges are denied. Releases share of
281   // mutator lock.
282   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
283       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
284       UNLOCK_FUNCTION(Locks::mutator_lock_);
285 
286   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)287   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
288     Roles::uninterruptible_.Acquire();  // No-op.
289     if (kIsDebugBuild) {
290       CHECK(cause != nullptr);
291       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
292       tls32_.no_thread_suspension++;
293       tlsPtr_.last_no_thread_suspension_cause = cause;
294       return previous_cause;
295     } else {
296       return nullptr;
297     }
298   }
299 
300   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)301   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
302     if (kIsDebugBuild) {
303       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
304       CHECK_GT(tls32_.no_thread_suspension, 0U);
305       tls32_.no_thread_suspension--;
306       tlsPtr_.last_no_thread_suspension_cause = old_cause;
307     }
308     Roles::uninterruptible_.Release();  // No-op.
309   }
310 
311   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
312 
313   // Return true if thread suspension is allowable.
314   bool IsThreadSuspensionAllowable() const;
315 
IsDaemon()316   bool IsDaemon() const {
317     return tls32_.daemon;
318   }
319 
320   size_t NumberOfHeldMutexes() const;
321 
322   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
323 
324   /*
325    * Changes the priority of this thread to match that of the java.lang.Thread object.
326    *
327    * We map a priority value from 1-10 to Linux "nice" values, where lower
328    * numbers indicate higher priority.
329    */
330   void SetNativePriority(int newPriority);
331 
332   /*
333    * Returns the thread priority for the current thread by querying the system.
334    * This is useful when attaching a thread through JNI.
335    *
336    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
337    */
338   static int GetNativePriority();
339 
340   // Guaranteed to be non-zero.
GetThreadId()341   uint32_t GetThreadId() const {
342     return tls32_.thin_lock_thread_id;
343   }
344 
GetTid()345   pid_t GetTid() const {
346     return tls32_.tid;
347   }
348 
349   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
350   mirror::String* GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
351 
352   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
353   // allocation, or locking.
354   void GetThreadName(std::string& name) const;
355 
356   // Sets the thread's name.
357   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
358 
359   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
360   uint64_t GetCpuMicroTime() const;
361 
GetPeer()362   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
363     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
364     CHECK(tlsPtr_.jpeer == nullptr);
365     return tlsPtr_.opeer;
366   }
367   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
368   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
369   // This function will explicitly mark/forward it.
370   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
371 
HasPeer()372   bool HasPeer() const {
373     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
374   }
375 
GetStats()376   RuntimeStats* GetStats() {
377     return &tls64_.stats;
378   }
379 
380   bool IsStillStarting() const;
381 
IsExceptionPending()382   bool IsExceptionPending() const {
383     return tlsPtr_.exception != nullptr;
384   }
385 
GetException()386   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
387     return tlsPtr_.exception;
388   }
389 
390   void AssertPendingException() const;
391   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
392   void AssertNoPendingException() const;
393   void AssertNoPendingExceptionForNewException(const char* msg) const;
394 
395   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
396 
ClearException()397   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
398     tlsPtr_.exception = nullptr;
399   }
400 
401   // Find catch block and perform long jump to appropriate exception handle
402   NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
403 
404   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)405   void ReleaseLongJumpContext(Context* context) {
406     if (tlsPtr_.long_jump_context != nullptr) {
407       // Each QuickExceptionHandler gets a long jump context and uses
408       // it for doing the long jump, after finding catch blocks/doing deoptimization.
409       // Both finding catch blocks and deoptimization can trigger another
410       // exception such as a result of class loading. So there can be nested
411       // cases of exception handling and multiple contexts being used.
412       // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
413       // for reuse so there is no need to always allocate a new one each time when
414       // getting a context. Since we only keep one context for reuse, delete the
415       // existing one since the passed in context is yet to be used for longjump.
416       delete tlsPtr_.long_jump_context;
417     }
418     tlsPtr_.long_jump_context = context;
419   }
420 
421   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
422   // abort the runtime iff abort_on_error is true.
423   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
424                               bool check_suspended = true,
425                               bool abort_on_error = true) const
426       REQUIRES_SHARED(Locks::mutator_lock_);
427 
428   // Returns whether the given exception was thrown by the current Java method being executed
429   // (Note that this includes native Java methods).
430   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
431       REQUIRES_SHARED(Locks::mutator_lock_);
432 
SetTopOfStack(ArtMethod ** top_method)433   void SetTopOfStack(ArtMethod** top_method) {
434     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
435   }
436 
SetTopOfShadowStack(ShadowFrame * top)437   void SetTopOfShadowStack(ShadowFrame* top) {
438     tlsPtr_.managed_stack.SetTopShadowFrame(top);
439   }
440 
HasManagedStack()441   bool HasManagedStack() const {
442     return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
443         (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
444   }
445 
446   // If 'msg' is null, no detail message is set.
447   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
448       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
449 
450   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
451   // used as the new exception's cause.
452   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
453       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
454 
455   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
456       __attribute__((format(printf, 3, 4)))
457       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
458 
459   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
460       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
461 
462   // OutOfMemoryError is special, because we need to pre-allocate an instance.
463   // Only the GC should call this.
464   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
465       REQUIRES(!Roles::uninterruptible_);
466 
467   static void Startup();
468   static void FinishStartup();
469   static void Shutdown();
470 
471   // JNI methods
GetJniEnv()472   JNIEnvExt* GetJniEnv() const {
473     return tlsPtr_.jni_env;
474   }
475 
476   // Convert a jobject into a Object*
477   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
478   // Checks if the weak global ref has been cleared by the GC without decoding it.
479   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
480 
GetMonitorEnterObject()481   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
482     return tlsPtr_.monitor_enter_object;
483   }
484 
SetMonitorEnterObject(mirror::Object * obj)485   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
486     tlsPtr_.monitor_enter_object = obj;
487   }
488 
489   // Implements java.lang.Thread.interrupted.
490   bool Interrupted() REQUIRES(!*wait_mutex_);
491   // Implements java.lang.Thread.isInterrupted.
492   bool IsInterrupted() REQUIRES(!*wait_mutex_);
IsInterruptedLocked()493   bool IsInterruptedLocked() REQUIRES(wait_mutex_) {
494     return interrupted_;
495   }
496   void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
SetInterruptedLocked(bool i)497   void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) {
498     interrupted_ = i;
499   }
500   void Notify() REQUIRES(!*wait_mutex_);
501 
PoisonObjectPointers()502   ALWAYS_INLINE void PoisonObjectPointers() {
503     ++poison_object_cookie_;
504   }
505 
506   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
507 
GetPoisonObjectCookie()508   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
509     return poison_object_cookie_;
510   }
511 
512  private:
513   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
514 
515  public:
GetWaitMutex()516   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
517     return wait_mutex_;
518   }
519 
GetWaitConditionVariable()520   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
521     return wait_cond_;
522   }
523 
GetWaitMonitor()524   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
525     return wait_monitor_;
526   }
527 
SetWaitMonitor(Monitor * mon)528   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
529     wait_monitor_ = mon;
530   }
531 
532   // Waiter link-list support.
GetWaitNext()533   Thread* GetWaitNext() const {
534     return tlsPtr_.wait_next;
535   }
536 
SetWaitNext(Thread * next)537   void SetWaitNext(Thread* next) {
538     tlsPtr_.wait_next = next;
539   }
540 
GetClassLoaderOverride()541   jobject GetClassLoaderOverride() {
542     return tlsPtr_.class_loader_override;
543   }
544 
545   void SetClassLoaderOverride(jobject class_loader_override);
546 
547   // Create the internal representation of a stack trace, that is more time
548   // and space efficient to compute than the StackTraceElement[].
549   template<bool kTransactionActive>
550   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
551       REQUIRES_SHARED(Locks::mutator_lock_);
552 
553   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
554   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
555   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
556   // with the number of valid frames in the returned array.
557   static jobjectArray InternalStackTraceToStackTraceElementArray(
558       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
559       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
560       REQUIRES_SHARED(Locks::mutator_lock_);
561 
HasDebuggerShadowFrames()562   bool HasDebuggerShadowFrames() const {
563     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
564   }
565 
566   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
567       REQUIRES_SHARED(Locks::mutator_lock_);
568 
569   ALWAYS_INLINE void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_);
570 
571   //
572   // Offsets of various members of native Thread class, used by compiled code.
573   //
574 
575   template<PointerSize pointer_size>
ThinLockIdOffset()576   static ThreadOffset<pointer_size> ThinLockIdOffset() {
577     return ThreadOffset<pointer_size>(
578         OFFSETOF_MEMBER(Thread, tls32_) +
579         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
580   }
581 
582   template<PointerSize pointer_size>
ThreadFlagsOffset()583   static ThreadOffset<pointer_size> ThreadFlagsOffset() {
584     return ThreadOffset<pointer_size>(
585         OFFSETOF_MEMBER(Thread, tls32_) +
586         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
587   }
588 
589   template<PointerSize pointer_size>
IsGcMarkingOffset()590   static ThreadOffset<pointer_size> IsGcMarkingOffset() {
591     return ThreadOffset<pointer_size>(
592         OFFSETOF_MEMBER(Thread, tls32_) +
593         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
594   }
595 
IsGcMarkingSize()596   static constexpr size_t IsGcMarkingSize() {
597     return sizeof(tls32_.is_gc_marking);
598   }
599 
600   // Deoptimize the Java stack.
601   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
602 
603  private:
604   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)605   static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
606     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
607     size_t scale;
608     size_t shrink;
609     if (pointer_size == kRuntimePointerSize) {
610       scale = 1;
611       shrink = 1;
612     } else if (pointer_size > kRuntimePointerSize) {
613       scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
614       shrink = 1;
615     } else {
616       DCHECK_GT(kRuntimePointerSize, pointer_size);
617       scale = 1;
618       shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
619     }
620     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
621   }
622 
623  public:
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)624   static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
625                                                 PointerSize pointer_size) {
626     if (pointer_size == PointerSize::k32) {
627       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
628           Uint32Value();
629     } else {
630       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
631           Uint32Value();
632     }
633   }
634 
635   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)636   static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
637     return ThreadOffsetFromTlsPtr<pointer_size>(
638         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
639   }
640 
641   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)642   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
643     return ThreadOffsetFromTlsPtr<pointer_size>(
644         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
645   }
646 
647   template<PointerSize pointer_size>
SelfOffset()648   static ThreadOffset<pointer_size> SelfOffset() {
649     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
650   }
651 
652   template<PointerSize pointer_size>
MterpCurrentIBaseOffset()653   static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
654     return ThreadOffsetFromTlsPtr<pointer_size>(
655         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
656   }
657 
658   template<PointerSize pointer_size>
MterpDefaultIBaseOffset()659   static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
660     return ThreadOffsetFromTlsPtr<pointer_size>(
661         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
662   }
663 
664   template<PointerSize pointer_size>
MterpAltIBaseOffset()665   static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
666     return ThreadOffsetFromTlsPtr<pointer_size>(
667         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
668   }
669 
670   template<PointerSize pointer_size>
ExceptionOffset()671   static ThreadOffset<pointer_size> ExceptionOffset() {
672     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
673   }
674 
675   template<PointerSize pointer_size>
PeerOffset()676   static ThreadOffset<pointer_size> PeerOffset() {
677     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
678   }
679 
680 
681   template<PointerSize pointer_size>
CardTableOffset()682   static ThreadOffset<pointer_size> CardTableOffset() {
683     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
684   }
685 
686   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()687   static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
688     return ThreadOffsetFromTlsPtr<pointer_size>(
689         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
690   }
691 
692   template<PointerSize pointer_size>
ThreadLocalPosOffset()693   static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
694     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
695                                                                 thread_local_pos));
696   }
697 
698   template<PointerSize pointer_size>
ThreadLocalEndOffset()699   static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
700     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
701                                                                 thread_local_end));
702   }
703 
704   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()705   static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
706     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
707                                                                 thread_local_objects));
708   }
709 
710   template<PointerSize pointer_size>
RosAllocRunsOffset()711   static ThreadOffset<pointer_size> RosAllocRunsOffset() {
712     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
713                                                                 rosalloc_runs));
714   }
715 
716   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()717   static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
718     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
719                                                                 thread_local_alloc_stack_top));
720   }
721 
722   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()723   static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
724     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
725                                                                 thread_local_alloc_stack_end));
726   }
727 
728   // Size of stack less any space reserved for stack overflow
GetStackSize()729   size_t GetStackSize() const {
730     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
731   }
732 
GetStackEndForInterpreter(bool implicit_overflow_check)733   uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
734     if (implicit_overflow_check) {
735       // The interpreter needs the extra overflow bytes that stack_end does
736       // not include.
737       return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
738     } else {
739       return tlsPtr_.stack_end;
740     }
741   }
742 
GetStackEnd()743   uint8_t* GetStackEnd() const {
744     return tlsPtr_.stack_end;
745   }
746 
747   // Set the stack end to that to be used during a stack overflow
748   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
749 
750   // Set the stack end to that to be used during regular execution
ResetDefaultStackEnd()751   void ResetDefaultStackEnd() {
752     // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
753     // to throw a StackOverflowError.
754     tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
755   }
756 
IsHandlingStackOverflow()757   bool IsHandlingStackOverflow() const {
758     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
759   }
760 
761   template<PointerSize pointer_size>
StackEndOffset()762   static ThreadOffset<pointer_size> StackEndOffset() {
763     return ThreadOffsetFromTlsPtr<pointer_size>(
764         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
765   }
766 
767   template<PointerSize pointer_size>
JniEnvOffset()768   static ThreadOffset<pointer_size> JniEnvOffset() {
769     return ThreadOffsetFromTlsPtr<pointer_size>(
770         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
771   }
772 
773   template<PointerSize pointer_size>
TopOfManagedStackOffset()774   static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
775     return ThreadOffsetFromTlsPtr<pointer_size>(
776         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
777         ManagedStack::TopQuickFrameOffset());
778   }
779 
GetManagedStack()780   const ManagedStack* GetManagedStack() const {
781     return &tlsPtr_.managed_stack;
782   }
783 
784   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)785   void PushManagedStackFragment(ManagedStack* fragment) {
786     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
787   }
PopManagedStackFragment(const ManagedStack & fragment)788   void PopManagedStackFragment(const ManagedStack& fragment) {
789     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
790   }
791 
PushShadowFrame(ShadowFrame * new_top_frame)792   ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
793     return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
794   }
795 
PopShadowFrame()796   ShadowFrame* PopShadowFrame() {
797     return tlsPtr_.managed_stack.PopShadowFrame();
798   }
799 
800   template<PointerSize pointer_size>
TopShadowFrameOffset()801   static ThreadOffset<pointer_size> TopShadowFrameOffset() {
802     return ThreadOffsetFromTlsPtr<pointer_size>(
803         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
804         ManagedStack::TopShadowFrameOffset());
805   }
806 
807   // Is the given obj in this thread's stack indirect reference table?
808   bool HandleScopeContains(jobject obj) const;
809 
810   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
811       REQUIRES_SHARED(Locks::mutator_lock_);
812 
GetTopHandleScope()813   BaseHandleScope* GetTopHandleScope() {
814     return tlsPtr_.top_handle_scope;
815   }
816 
PushHandleScope(BaseHandleScope * handle_scope)817   void PushHandleScope(BaseHandleScope* handle_scope) {
818     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
819     tlsPtr_.top_handle_scope = handle_scope;
820   }
821 
PopHandleScope()822   BaseHandleScope* PopHandleScope() {
823     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
824     DCHECK(handle_scope != nullptr);
825     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
826     return handle_scope;
827   }
828 
829   template<PointerSize pointer_size>
TopHandleScopeOffset()830   static ThreadOffset<pointer_size> TopHandleScopeOffset() {
831     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
832                                                                 top_handle_scope));
833   }
834 
GetInvokeReq()835   DebugInvokeReq* GetInvokeReq() const {
836     return tlsPtr_.debug_invoke_req;
837   }
838 
GetSingleStepControl()839   SingleStepControl* GetSingleStepControl() const {
840     return tlsPtr_.single_step_control;
841   }
842 
843   // Indicates whether this thread is ready to invoke a method for debugging. This
844   // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()845   bool IsReadyForDebugInvoke() const {
846     return tls32_.ready_for_debug_invoke;
847   }
848 
SetReadyForDebugInvoke(bool ready)849   void SetReadyForDebugInvoke(bool ready) {
850     tls32_.ready_for_debug_invoke = ready;
851   }
852 
IsDebugMethodEntry()853   bool IsDebugMethodEntry() const {
854     return tls32_.debug_method_entry_;
855   }
856 
SetDebugMethodEntry()857   void SetDebugMethodEntry() {
858     tls32_.debug_method_entry_ = true;
859   }
860 
ClearDebugMethodEntry()861   void ClearDebugMethodEntry() {
862     tls32_.debug_method_entry_ = false;
863   }
864 
GetIsGcMarking()865   bool GetIsGcMarking() const {
866     CHECK(kUseReadBarrier);
867     return tls32_.is_gc_marking;
868   }
869 
870   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
871 
GetWeakRefAccessEnabled()872   bool GetWeakRefAccessEnabled() const {
873     CHECK(kUseReadBarrier);
874     return tls32_.weak_ref_access_enabled;
875   }
876 
SetWeakRefAccessEnabled(bool enabled)877   void SetWeakRefAccessEnabled(bool enabled) {
878     CHECK(kUseReadBarrier);
879     tls32_.weak_ref_access_enabled = enabled;
880   }
881 
GetDisableThreadFlipCount()882   uint32_t GetDisableThreadFlipCount() const {
883     CHECK(kUseReadBarrier);
884     return tls32_.disable_thread_flip_count;
885   }
886 
IncrementDisableThreadFlipCount()887   void IncrementDisableThreadFlipCount() {
888     CHECK(kUseReadBarrier);
889     ++tls32_.disable_thread_flip_count;
890   }
891 
DecrementDisableThreadFlipCount()892   void DecrementDisableThreadFlipCount() {
893     CHECK(kUseReadBarrier);
894     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
895     --tls32_.disable_thread_flip_count;
896   }
897 
898   // Returns true if the thread is allowed to call into java.
CanCallIntoJava()899   bool CanCallIntoJava() const {
900     return can_call_into_java_;
901   }
902 
SetCanCallIntoJava(bool can_call_into_java)903   void SetCanCallIntoJava(bool can_call_into_java) {
904     can_call_into_java_ = can_call_into_java;
905   }
906 
907   // Activates single step control for debugging. The thread takes the
908   // ownership of the given SingleStepControl*. It is deleted by a call
909   // to DeactivateSingleStepControl or upon thread destruction.
910   void ActivateSingleStepControl(SingleStepControl* ssc);
911 
912   // Deactivates single step control for debugging.
913   void DeactivateSingleStepControl();
914 
915   // Sets debug invoke request for debugging. When the thread is resumed,
916   // it executes the method described by this request then sends the reply
917   // before suspending itself. The thread takes the ownership of the given
918   // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
919   void SetDebugInvokeReq(DebugInvokeReq* req);
920 
921   // Clears debug invoke request for debugging. When the thread completes
922   // method invocation, it deletes its debug invoke request and suspends
923   // itself.
924   void ClearDebugInvokeReq();
925 
926   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()927   static mirror::Throwable* GetDeoptimizationException() {
928     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
929     // represented by ObjPtr.
930     return reinterpret_cast<mirror::Throwable*>(0x100);
931   }
932 
933   // Currently deoptimization invokes verifier which can trigger class loading
934   // and execute Java code, so there might be nested deoptimizations happening.
935   // We need to save the ongoing deoptimization shadow frames and return
936   // values on stacks.
937   // 'from_code' denotes whether the deoptimization was explicitly made from
938   // compiled code.
939   void PushDeoptimizationContext(const JValue& return_value,
940                                  bool is_reference,
941                                  bool from_code,
942                                  ObjPtr<mirror::Throwable> exception)
943       REQUIRES_SHARED(Locks::mutator_lock_);
944   void PopDeoptimizationContext(JValue* result,
945                                 ObjPtr<mirror::Throwable>* exception,
946                                 bool* from_code)
947       REQUIRES_SHARED(Locks::mutator_lock_);
948   void AssertHasDeoptimizationContext()
949       REQUIRES_SHARED(Locks::mutator_lock_);
950   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
951   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
952 
953   // For debugger, find the shadow frame that corresponds to a frame id.
954   // Or return null if there is none.
955   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
956       REQUIRES_SHARED(Locks::mutator_lock_);
957   // For debugger, find the bool array that keeps track of the updated vreg set
958   // for a frame id.
959   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
960   // For debugger, find the shadow frame that corresponds to a frame id. If
961   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
962   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
963                                                uint32_t num_vregs,
964                                                ArtMethod* method,
965                                                uint32_t dex_pc)
966       REQUIRES_SHARED(Locks::mutator_lock_);
967 
968   // Delete the entry that maps from frame_id to shadow_frame.
969   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
970       REQUIRES_SHARED(Locks::mutator_lock_);
971 
GetInstrumentationStack()972   std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
973     return tlsPtr_.instrumentation_stack;
974   }
975 
GetStackTraceSample()976   std::vector<ArtMethod*>* GetStackTraceSample() const {
977     DCHECK(!IsAotCompiler());
978     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
979   }
980 
SetStackTraceSample(std::vector<ArtMethod * > * sample)981   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
982     DCHECK(!IsAotCompiler());
983     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
984   }
985 
GetVerifierDeps()986   verifier::VerifierDeps* GetVerifierDeps() const {
987     DCHECK(IsAotCompiler());
988     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
989   }
990 
991   // It is the responsability of the caller to make sure the verifier_deps
992   // entry in the thread is cleared before destruction of the actual VerifierDeps
993   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)994   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
995     DCHECK(IsAotCompiler());
996     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
997     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
998   }
999 
GetTraceClockBase()1000   uint64_t GetTraceClockBase() const {
1001     return tls64_.trace_clock_base;
1002   }
1003 
SetTraceClockBase(uint64_t clock_base)1004   void SetTraceClockBase(uint64_t clock_base) {
1005     tls64_.trace_clock_base = clock_base;
1006   }
1007 
GetHeldMutex(LockLevel level)1008   BaseMutex* GetHeldMutex(LockLevel level) const {
1009     return tlsPtr_.held_mutexes[level];
1010   }
1011 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1012   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1013     tlsPtr_.held_mutexes[level] = mutex;
1014   }
1015 
1016   void ClearSuspendBarrier(AtomicInteger* target)
1017       REQUIRES(Locks::thread_suspend_count_lock_);
1018 
ReadFlag(ThreadFlag flag)1019   bool ReadFlag(ThreadFlag flag) const {
1020     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1021   }
1022 
TestAllFlags()1023   bool TestAllFlags() const {
1024     return (tls32_.state_and_flags.as_struct.flags != 0);
1025   }
1026 
AtomicSetFlag(ThreadFlag flag)1027   void AtomicSetFlag(ThreadFlag flag) {
1028     tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
1029   }
1030 
AtomicClearFlag(ThreadFlag flag)1031   void AtomicClearFlag(ThreadFlag flag) {
1032     tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
1033   }
1034 
1035   void ResetQuickAllocEntryPointsForThread(bool is_marking);
1036 
1037   // Returns the remaining space in the TLAB.
TlabSize()1038   size_t TlabSize() const {
1039     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1040   }
1041 
1042   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1043   size_t TlabRemainingCapacity() const {
1044     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1045   }
1046 
1047   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1048   void ExpandTlab(size_t bytes) {
1049     tlsPtr_.thread_local_end += bytes;
1050     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1051   }
1052 
1053   // Doesn't check that there is room.
1054   mirror::Object* AllocTlab(size_t bytes);
1055   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1056   bool HasTlab() const;
GetTlabStart()1057   uint8_t* GetTlabStart() {
1058     return tlsPtr_.thread_local_start;
1059   }
GetTlabPos()1060   uint8_t* GetTlabPos() {
1061     return tlsPtr_.thread_local_pos;
1062   }
1063 
1064   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1065   // equal to a valid pointer.
1066   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1067   void RemoveSuspendTrigger() {
1068     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1069   }
1070 
1071   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1072   // The next time a suspend check is done, it will load from the value at this address
1073   // and trigger a SIGSEGV.
TriggerSuspend()1074   void TriggerSuspend() {
1075     tlsPtr_.suspend_trigger = nullptr;
1076   }
1077 
1078 
1079   // Push an object onto the allocation stack.
1080   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1081       REQUIRES_SHARED(Locks::mutator_lock_);
1082 
1083   // Set the thread local allocation pointers to the given pointers.
1084   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1085                                      StackReference<mirror::Object>* end);
1086 
1087   // Resets the thread local allocation pointers.
1088   void RevokeThreadLocalAllocationStack();
1089 
GetThreadLocalBytesAllocated()1090   size_t GetThreadLocalBytesAllocated() const {
1091     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1092   }
1093 
GetThreadLocalObjectsAllocated()1094   size_t GetThreadLocalObjectsAllocated() const {
1095     return tlsPtr_.thread_local_objects;
1096   }
1097 
GetRosAllocRun(size_t index)1098   void* GetRosAllocRun(size_t index) const {
1099     return tlsPtr_.rosalloc_runs[index];
1100   }
1101 
SetRosAllocRun(size_t index,void * run)1102   void SetRosAllocRun(size_t index, void* run) {
1103     tlsPtr_.rosalloc_runs[index] = run;
1104   }
1105 
1106   bool ProtectStack(bool fatal_on_error = true);
1107   bool UnprotectStack();
1108 
SetMterpDefaultIBase(void * ibase)1109   void SetMterpDefaultIBase(void* ibase) {
1110     tlsPtr_.mterp_default_ibase = ibase;
1111   }
1112 
SetMterpCurrentIBase(void * ibase)1113   void SetMterpCurrentIBase(void* ibase) {
1114     tlsPtr_.mterp_current_ibase = ibase;
1115   }
1116 
SetMterpAltIBase(void * ibase)1117   void SetMterpAltIBase(void* ibase) {
1118     tlsPtr_.mterp_alt_ibase = ibase;
1119   }
1120 
GetMterpDefaultIBase()1121   const void* GetMterpDefaultIBase() const {
1122     return tlsPtr_.mterp_default_ibase;
1123   }
1124 
GetMterpCurrentIBase()1125   const void* GetMterpCurrentIBase() const {
1126     return tlsPtr_.mterp_current_ibase;
1127   }
1128 
GetMterpAltIBase()1129   const void* GetMterpAltIBase() const {
1130     return tlsPtr_.mterp_alt_ibase;
1131   }
1132 
HandlingSignal()1133   bool HandlingSignal() const {
1134     return tls32_.handling_signal_;
1135   }
1136 
SetHandlingSignal(bool handling_signal)1137   void SetHandlingSignal(bool handling_signal) {
1138     tls32_.handling_signal_ = handling_signal;
1139   }
1140 
IsTransitioningToRunnable()1141   bool IsTransitioningToRunnable() const {
1142     return tls32_.is_transitioning_to_runnable;
1143   }
1144 
SetIsTransitioningToRunnable(bool value)1145   void SetIsTransitioningToRunnable(bool value) {
1146     tls32_.is_transitioning_to_runnable = value;
1147   }
1148 
1149   void PushVerifier(verifier::MethodVerifier* verifier);
1150   void PopVerifier(verifier::MethodVerifier* verifier);
1151 
1152   void InitStringEntryPoints();
1153 
ModifyDebugDisallowReadBarrier(int8_t delta)1154   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1155     debug_disallow_read_barrier_ += delta;
1156   }
1157 
GetDebugDisallowReadBarrierCount()1158   uint8_t GetDebugDisallowReadBarrierCount() const {
1159     return debug_disallow_read_barrier_;
1160   }
1161 
GetCustomTLS()1162   const void* GetCustomTLS() const {
1163     return custom_tls_;
1164   }
1165 
SetCustomTLS(const void * data)1166   void SetCustomTLS(const void* data) {
1167     custom_tls_ = data;
1168   }
1169 
1170   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1171   bool IsJitSensitiveThread() const {
1172     return this == jit_sensitive_thread_;
1173   }
1174 
1175   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1176   static bool IsSensitiveThread() {
1177     if (is_sensitive_thread_hook_ != nullptr) {
1178       return (*is_sensitive_thread_hook_)();
1179     }
1180     return false;
1181   }
1182 
1183   static jobject CreateCompileTimePeer(JNIEnv* env,
1184                                        const char* name,
1185                                        bool as_daemon,
1186                                        jobject thread_group)
1187       REQUIRES_SHARED(Locks::mutator_lock_);
1188 
1189  private:
1190   explicit Thread(bool daemon);
1191   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1192   void Destroy();
1193 
1194   // Attaches the calling native thread to the runtime, returning the new native peer.
1195   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1196   template <typename PeerAction>
1197   static Thread* Attach(const char* thread_name,
1198                         bool as_daemon,
1199                         PeerAction p);
1200 
1201   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1202 
1203   template<bool kTransactionActive>
1204   static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1205                        ObjPtr<mirror::Object> peer,
1206                        jboolean thread_is_daemon,
1207                        jobject thread_group,
1208                        jobject thread_name,
1209                        jint thread_priority)
1210       REQUIRES_SHARED(Locks::mutator_lock_);
1211 
1212   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
1213   // Dbg::ManageDeoptimization.
SetStateUnsafe(ThreadState new_state)1214   ThreadState SetStateUnsafe(ThreadState new_state) {
1215     ThreadState old_state = GetState();
1216     if (old_state == kRunnable && new_state != kRunnable) {
1217       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1218       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1219       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1220       TransitionToSuspendedAndRunCheckpoints(new_state);
1221       // Since we transitioned to a suspended state, check the pass barrier requests.
1222       PassActiveSuspendBarriers();
1223     } else {
1224       tls32_.state_and_flags.as_struct.state = new_state;
1225     }
1226     return old_state;
1227   }
1228 
1229   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1230 
1231   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1232   void DumpStack(std::ostream& os,
1233                  bool dump_native_stack = true,
1234                  BacktraceMap* backtrace_map = nullptr,
1235                  bool force_dump_stack = false) const
1236       REQUIRES(!Locks::thread_suspend_count_lock_)
1237       REQUIRES_SHARED(Locks::mutator_lock_);
1238 
1239   // Out-of-line conveniences for debugging in gdb.
1240   static Thread* CurrentFromGdb();  // Like Thread::Current.
1241   // Like Thread::Dump(std::cerr).
1242   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1243 
1244   static void* CreateCallback(void* arg);
1245 
1246   void HandleUncaughtExceptions(ScopedObjectAccess& soa)
1247       REQUIRES_SHARED(Locks::mutator_lock_);
1248   void RemoveFromThreadGroup(ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_);
1249 
1250   // Initialize a thread.
1251   //
1252   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1253   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1254   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1255   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1256   // of false).
1257   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1258       REQUIRES(Locks::runtime_shutdown_lock_);
1259   void InitCardTable();
1260   void InitCpu();
1261   void CleanupCpu();
1262   void InitTlsEntryPoints();
1263   void InitTid();
1264   void InitPthreadKeySelf();
1265   bool InitStackHwm();
1266 
1267   void SetUpAlternateSignalStack();
1268   void TearDownAlternateSignalStack();
1269 
1270   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1271       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1272 
1273   ALWAYS_INLINE void PassActiveSuspendBarriers()
1274       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1275 
1276   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1277   static void SetJitSensitiveThread() {
1278     if (jit_sensitive_thread_ == nullptr) {
1279       jit_sensitive_thread_ = Thread::Current();
1280     } else {
1281       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1282           << Thread::Current()->GetTid();
1283     }
1284   }
1285 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1286   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1287     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1288   }
1289 
1290   bool ModifySuspendCountInternal(Thread* self,
1291                                   int delta,
1292                                   AtomicInteger* suspend_barrier,
1293                                   bool for_debugger)
1294       WARN_UNUSED
1295       REQUIRES(Locks::thread_suspend_count_lock_);
1296 
1297   void RunCheckpointFunction();
1298   void RunEmptyCheckpoint();
1299 
1300   bool PassActiveSuspendBarriers(Thread* self)
1301       REQUIRES(!Locks::thread_suspend_count_lock_);
1302 
1303   // Install the protected region for implicit stack checks.
1304   void InstallImplicitProtection();
1305 
1306   template <bool kPrecise>
1307   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1308 
1309   static bool IsAotCompiler();
1310 
1311   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1312   // change from being Suspended to Runnable without a suspend request occurring.
1313   union PACKED(4) StateAndFlags {
StateAndFlags()1314     StateAndFlags() {}
1315     struct PACKED(4) {
1316       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1317       // ThreadFlags for bit field meanings.
1318       volatile uint16_t flags;
1319       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1320       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1321       // operation. If a thread is suspended and a suspend_request is present, a thread may not
1322       // change to Runnable as a GC or other operation is in progress.
1323       volatile uint16_t state;
1324     } as_struct;
1325     AtomicInteger as_atomic_int;
1326     volatile int32_t as_int;
1327 
1328    private:
1329     // gcc does not handle struct with volatile member assignments correctly.
1330     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1331     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1332   };
1333   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1334 
1335   static void ThreadExitCallback(void* arg);
1336 
1337   // Maximum number of suspend barriers.
1338   static constexpr uint32_t kMaxSuspendBarriers = 3;
1339 
1340   // Has Thread::Startup been called?
1341   static bool is_started_;
1342 
1343   // TLS key used to retrieve the Thread*.
1344   static pthread_key_t pthread_key_self_;
1345 
1346   // Used to notify threads that they should attempt to resume, they will suspend again if
1347   // their suspend count is > 0.
1348   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1349 
1350   // Hook passed by framework which returns true
1351   // when StrictMode events are traced for the current thread.
1352   static bool (*is_sensitive_thread_hook_)();
1353   // Stores the jit sensitive thread (which for now is the UI thread).
1354   static Thread* jit_sensitive_thread_;
1355 
1356   /***********************************************************************************************/
1357   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1358   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1359   // first if possible.
1360   /***********************************************************************************************/
1361 
1362   struct PACKED(4) tls_32bit_sized_values {
1363     // We have no control over the size of 'bool', but want our boolean fields
1364     // to be 4-byte quantities.
1365     typedef uint32_t bool32_t;
1366 
tls_32bit_sized_valuestls_32bit_sized_values1367     explicit tls_32bit_sized_values(bool is_daemon) :
1368       suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1369       daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
1370       thread_exit_check_count(0), handling_signal_(false),
1371       is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
1372       debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
1373       disable_thread_flip_count(0) {
1374     }
1375 
1376     union StateAndFlags state_and_flags;
1377     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1378                   "Size of state_and_flags and int32 are different");
1379 
1380     // A non-zero value is used to tell the current thread to enter a safe point
1381     // at the next poll.
1382     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1383 
1384     // How much of 'suspend_count_' is by request of the debugger, used to set things right
1385     // when the debugger detaches. Must be <= suspend_count_.
1386     int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1387 
1388     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1389     // This is not to be confused with the native thread's tid, nor is it the value returned
1390     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1391     // important difference between this id and the ids visible to managed code is that these
1392     // ones get reused (to ensure that they fit in the number of bits available).
1393     uint32_t thin_lock_thread_id;
1394 
1395     // System thread id.
1396     uint32_t tid;
1397 
1398     // Is the thread a daemon?
1399     const bool32_t daemon;
1400 
1401     // A boolean telling us whether we're recursively throwing OOME.
1402     bool32_t throwing_OutOfMemoryError;
1403 
1404     // A positive value implies we're in a region where thread suspension isn't expected.
1405     uint32_t no_thread_suspension;
1406 
1407     // How many times has our pthread key's destructor been called?
1408     uint32_t thread_exit_check_count;
1409 
1410     // True if signal is being handled by this thread.
1411     bool32_t handling_signal_;
1412 
1413     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1414     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1415     // the rest of them.
1416     bool32_t is_transitioning_to_runnable;
1417 
1418     // True if the thread has been suspended by a debugger event. This is
1419     // used to invoke method from the debugger which is only allowed when
1420     // the thread is suspended by an event.
1421     bool32_t ready_for_debug_invoke;
1422 
1423     // True if the thread enters a method. This is used to detect method entry
1424     // event for the debugger.
1425     bool32_t debug_method_entry_;
1426 
1427     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1428     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1429     // GC roots.
1430     bool32_t is_gc_marking;
1431 
1432     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1433     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1434     // processing of the CC collector only. This is thread local so that we can enable/disable weak
1435     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1436     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1437     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1438     // ReferenceProcessor::EnableSlowPath().
1439     bool32_t weak_ref_access_enabled;
1440 
1441     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1442     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1443     // critical section enter.
1444     uint32_t disable_thread_flip_count;
1445   } tls32_;
1446 
1447   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1448     tls_64bit_sized_values() : trace_clock_base(0) {
1449     }
1450 
1451     // The clock base used for tracing.
1452     uint64_t trace_clock_base;
1453 
1454     RuntimeStats stats;
1455   } tls64_;
1456 
PACKED(sizeof (void *))1457   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1458       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1459       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1460       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1461       deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1462       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1463       instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
1464       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1465       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1466       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1467       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1468       thread_local_limit(nullptr),
1469       thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
1470       mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1471       thread_local_alloc_stack_end(nullptr),
1472       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr) {
1473       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1474     }
1475 
1476     // The biased card table, see CardTable for details.
1477     uint8_t* card_table;
1478 
1479     // The pending exception or null.
1480     mirror::Throwable* exception;
1481 
1482     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1483     // We leave extra space so there's room for the code that throws StackOverflowError.
1484     uint8_t* stack_end;
1485 
1486     // The top of the managed stack often manipulated directly by compiler generated code.
1487     ManagedStack managed_stack;
1488 
1489     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1490     // normally set to the address of itself.
1491     uintptr_t* suspend_trigger;
1492 
1493     // Every thread may have an associated JNI environment
1494     JNIEnvExt* jni_env;
1495 
1496     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1497     // created thread.
1498     JNIEnvExt* tmp_jni_env;
1499 
1500     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1501     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1502     // Thread::Current to give the address.
1503     Thread* self;
1504 
1505     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1506     // start up, until the thread is registered and the local opeer_ is used.
1507     mirror::Object* opeer;
1508     jobject jpeer;
1509 
1510     // The "lowest addressable byte" of the stack.
1511     uint8_t* stack_begin;
1512 
1513     // Size of the stack.
1514     size_t stack_size;
1515 
1516     // Sampling profiler and AOT verification cannot happen on the same run, so we share
1517     // the same entry for the stack trace and the verifier deps.
1518     union DepsOrStackTraceSample {
1519       DepsOrStackTraceSample() {
1520         verifier_deps = nullptr;
1521         stack_trace_sample = nullptr;
1522       }
1523       // Pointer to previous stack trace captured by sampling profiler.
1524       std::vector<ArtMethod*>* stack_trace_sample;
1525       // When doing AOT verification, per-thread VerifierDeps.
1526       verifier::VerifierDeps* verifier_deps;
1527     } deps_or_stack_trace_sample;
1528 
1529     // The next thread in the wait set this thread is part of or null if not waiting.
1530     Thread* wait_next;
1531 
1532     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1533     mirror::Object* monitor_enter_object;
1534 
1535     // Top of linked list of handle scopes or null for none.
1536     BaseHandleScope* top_handle_scope;
1537 
1538     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1539     // useful for testing.
1540     jobject class_loader_override;
1541 
1542     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1543     Context* long_jump_context;
1544 
1545     // Additional stack used by method instrumentation to store method and return pc values.
1546     // Stored as a pointer since std::deque is not PACKED.
1547     std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1548 
1549     // JDWP invoke-during-breakpoint support.
1550     DebugInvokeReq* debug_invoke_req;
1551 
1552     // JDWP single-stepping support.
1553     SingleStepControl* single_step_control;
1554 
1555     // For gc purpose, a shadow frame record stack that keeps track of:
1556     // 1) shadow frames under construction.
1557     // 2) deoptimization shadow frames.
1558     StackedShadowFrameRecord* stacked_shadow_frame_record;
1559 
1560     // Deoptimization return value record stack.
1561     DeoptimizationContextRecord* deoptimization_context_stack;
1562 
1563     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1564     // Shadow frames may be created before deoptimization happens so that the debugger can
1565     // set local values there first.
1566     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1567 
1568     // A cached copy of the java.lang.Thread's name.
1569     std::string* name;
1570 
1571     // A cached pthread_t for the pthread underlying this Thread*.
1572     pthread_t pthread_self;
1573 
1574     // If no_thread_suspension_ is > 0, what is causing that assertion.
1575     const char* last_no_thread_suspension_cause;
1576 
1577     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1578     // requests another checkpoint, it goes to the checkpoint overflow list.
1579     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1580 
1581     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1582     // Locks::thread_suspend_count_lock_.
1583     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1584     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1585     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1586 
1587     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1588     uint8_t* thread_local_start;
1589 
1590     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1591     // potentially better performance.
1592     uint8_t* thread_local_pos;
1593     uint8_t* thread_local_end;
1594 
1595     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1596     // equal to thread_local_end.
1597     uint8_t* thread_local_limit;
1598 
1599     size_t thread_local_objects;
1600 
1601     // Entrypoint function pointers.
1602     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1603     JniEntryPoints jni_entrypoints;
1604     QuickEntryPoints quick_entrypoints;
1605 
1606     // Mterp jump table bases.
1607     void* mterp_current_ibase;
1608     void* mterp_default_ibase;
1609     void* mterp_alt_ibase;
1610 
1611     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1612     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1613 
1614     // Thread-local allocation stack data/routines.
1615     StackReference<mirror::Object>* thread_local_alloc_stack_top;
1616     StackReference<mirror::Object>* thread_local_alloc_stack_end;
1617 
1618     // Support for Mutex lock hierarchy bug detection.
1619     BaseMutex* held_mutexes[kLockLevelCount];
1620 
1621     // The function used for thread flip.
1622     Closure* flip_function;
1623 
1624     // Current method verifier, used for root marking.
1625     verifier::MethodVerifier* method_verifier;
1626 
1627     // Thread-local mark stack for the concurrent copying collector.
1628     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1629   } tlsPtr_;
1630 
1631   // Guards the 'interrupted_' and 'wait_monitor_' members.
1632   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1633 
1634   // Condition variable waited upon during a wait.
1635   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1636   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1637   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1638 
1639   // Thread "interrupted" status; stays raised until queried or thrown.
1640   bool interrupted_ GUARDED_BY(wait_mutex_);
1641 
1642   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1643   uint8_t debug_disallow_read_barrier_ = 0;
1644 
1645   // Note that it is not in the packed struct, may not be accessed for cross compilation.
1646   uintptr_t poison_object_cookie_ = 0;
1647 
1648   // Pending extra checkpoints if checkpoint_function_ is already used.
1649   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1650 
1651   // Custom TLS field that can be used by plugins.
1652   // TODO: Generalize once we have more plugins.
1653   const void* custom_tls_;
1654 
1655   // True if the thread is allowed to call back into java (for e.g. during class resolution).
1656   // By default this is true.
1657   bool can_call_into_java_;
1658 
1659   friend class Dbg;  // For SetStateUnsafe.
1660   friend class gc::collector::SemiSpace;  // For getting stack traces.
1661   friend class Runtime;  // For CreatePeer.
1662   friend class QuickExceptionHandler;  // For dumping the stack.
1663   friend class ScopedThreadStateChange;
1664   friend class StubTest;  // For accessing entrypoints.
1665   friend class ThreadList;  // For ~Thread and Destroy.
1666 
1667   friend class EntrypointsOrderTest;  // To test the order of tls entries.
1668 
1669   DISALLOW_COPY_AND_ASSIGN(Thread);
1670 };
1671 
1672 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1673  public:
ScopedAssertNoThreadSuspension(const char * cause)1674   ALWAYS_INLINE explicit ScopedAssertNoThreadSuspension(const char* cause)
1675       ACQUIRE(Roles::uninterruptible_) {
1676     if (kIsDebugBuild) {
1677       self_ = Thread::Current();
1678       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1679     } else {
1680       Roles::uninterruptible_.Acquire();  // No-op.
1681     }
1682   }
~ScopedAssertNoThreadSuspension()1683   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1684     if (kIsDebugBuild) {
1685       self_->EndAssertNoThreadSuspension(old_cause_);
1686     } else {
1687       Roles::uninterruptible_.Release();  // No-op.
1688     }
1689   }
1690 
1691  private:
1692   Thread* self_;
1693   const char* old_cause_;
1694 };
1695 
1696 class ScopedStackedShadowFramePusher {
1697  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1698   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1699     : self_(self), type_(type) {
1700     self_->PushStackedShadowFrame(sf, type);
1701   }
~ScopedStackedShadowFramePusher()1702   ~ScopedStackedShadowFramePusher() {
1703     self_->PopStackedShadowFrame(type_);
1704   }
1705 
1706  private:
1707   Thread* const self_;
1708   const StackedShadowFrameType type_;
1709 
1710   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1711 };
1712 
1713 // Only works for debug builds.
1714 class ScopedDebugDisallowReadBarriers {
1715  public:
ScopedDebugDisallowReadBarriers(Thread * self)1716   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1717     self_->ModifyDebugDisallowReadBarrier(1);
1718   }
~ScopedDebugDisallowReadBarriers()1719   ~ScopedDebugDisallowReadBarriers() {
1720     self_->ModifyDebugDisallowReadBarrier(-1);
1721   }
1722 
1723  private:
1724   Thread* const self_;
1725 };
1726 
1727 class ScopedTransitioningToRunnable : public ValueObject {
1728  public:
ScopedTransitioningToRunnable(Thread * self)1729   explicit ScopedTransitioningToRunnable(Thread* self)
1730       : self_(self) {
1731     DCHECK_EQ(self, Thread::Current());
1732     if (kUseReadBarrier) {
1733       self_->SetIsTransitioningToRunnable(true);
1734     }
1735   }
1736 
~ScopedTransitioningToRunnable()1737   ~ScopedTransitioningToRunnable() {
1738     if (kUseReadBarrier) {
1739       self_->SetIsTransitioningToRunnable(false);
1740     }
1741   }
1742 
1743  private:
1744   Thread* const self_;
1745 };
1746 
1747 class ThreadLifecycleCallback {
1748  public:
~ThreadLifecycleCallback()1749   virtual ~ThreadLifecycleCallback() {}
1750 
1751   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1752   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
1753 };
1754 
1755 std::ostream& operator<<(std::ostream& os, const Thread& thread);
1756 std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
1757 
1758 }  // namespace art
1759 
1760 #endif  // ART_RUNTIME_THREAD_H_
1761