1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_THREAD_INL_H_
18 #define ART_RUNTIME_THREAD_INL_H_
19
20 #include "arch/instruction_set.h"
21 #include "base/aborting.h"
22 #include "base/casts.h"
23 #include "base/mutex-inl.h"
24 #include "base/time_utils.h"
25 #include "indirect_reference_table.h"
26 #include "jni/jni_env_ext.h"
27 #include "managed_stack-inl.h"
28 #include "obj_ptr-inl.h"
29 #include "runtime.h"
30 #include "thread-current-inl.h"
31 #include "thread.h"
32 #include "thread_list.h"
33 #include "thread_pool.h"
34
35 namespace art HIDDEN {
36
37 // Quickly access the current thread from a JNIEnv.
ForEnv(JNIEnv * env)38 inline Thread* Thread::ForEnv(JNIEnv* env) {
39 JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env));
40 return full_env->GetSelf();
41 }
42
GetStackOverflowProtectedSize()43 inline size_t Thread::GetStackOverflowProtectedSize() {
44 // The kMemoryToolStackGuardSizeScale is expected to be 1 when ASan is not enabled.
45 // As the function is always inlined, in those cases each function call should turn
46 // into a simple reference to gPageSize.
47 return kMemoryToolStackGuardSizeScale * gPageSize;
48 }
49
DecodeJObject(jobject obj)50 inline ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const {
51 if (obj == nullptr) {
52 return nullptr;
53 }
54 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
55 if (LIKELY(IndirectReferenceTable::IsJniTransitionOrLocalReference(ref))) {
56 // For JNI transitions, the `jclass` for a static method points to the
57 // `CompressedReference<>` in the `ArtMethod::declaring_class_` and other `jobject`
58 // arguments point to spilled stack references but a `StackReference<>` is just
59 // a subclass of `CompressedReference<>`. Local references also point to
60 // a `CompressedReference<>` encapsulated in a `GcRoot<>`.
61 if (kIsDebugBuild && IndirectReferenceTable::GetIndirectRefKind(ref) == kJniTransition) {
62 CHECK(IsJniTransitionReference(obj));
63 }
64 auto* cref = IndirectReferenceTable::ClearIndirectRefKind<
65 mirror::CompressedReference<mirror::Object>*>(ref);
66 ObjPtr<mirror::Object> result = cref->AsMirrorPtr();
67 if (kIsDebugBuild && IndirectReferenceTable::GetIndirectRefKind(ref) != kJniTransition) {
68 CHECK_EQ(result, tlsPtr_.jni_env->locals_.Get(ref));
69 }
70 return result;
71 } else {
72 return DecodeGlobalJObject(obj);
73 }
74 }
75
AllowThreadSuspension()76 inline void Thread::AllowThreadSuspension() {
77 CheckSuspend();
78 // Invalidate the current thread's object pointers (ObjPtr) to catch possible moving GC bugs due
79 // to missing handles.
80 PoisonObjectPointers();
81 }
82
CheckSuspend(bool implicit)83 inline void Thread::CheckSuspend(bool implicit) {
84 DCHECK_EQ(Thread::Current(), this);
85 while (true) {
86 StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
87 if (LIKELY(!state_and_flags.IsAnyOfFlagsSet(SuspendOrCheckpointRequestFlags()))) {
88 break;
89 } else if (state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest)) {
90 RunCheckpointFunction();
91 } else if (state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest) &&
92 !state_and_flags.IsFlagSet(ThreadFlag::kSuspensionImmune)) {
93 FullSuspendCheck(implicit);
94 implicit = false; // We do not need to `MadviseAwayAlternateSignalStack()` anymore.
95 } else if (state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest)) {
96 RunEmptyCheckpoint();
97 } else {
98 DCHECK(state_and_flags.IsFlagSet(ThreadFlag::kSuspensionImmune));
99 break;
100 }
101 }
102 if (implicit) {
103 // For implicit suspend check we want to `madvise()` away
104 // the alternate signal stack to avoid wasting memory.
105 MadviseAwayAlternateSignalStack();
106 }
107 }
108
CheckEmptyCheckpointFromWeakRefAccess(BaseMutex * cond_var_mutex)109 inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) {
110 Thread* self = Thread::Current();
111 DCHECK_EQ(self, this);
112 for (;;) {
113 if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
114 RunEmptyCheckpoint();
115 // Check we hold only an expected mutex when accessing weak ref.
116 if (kIsDebugBuild) {
117 for (int i = kLockLevelCount - 1; i >= 0; --i) {
118 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
119 if (held_mutex != nullptr && held_mutex != GetMutatorLock() &&
120 held_mutex != cond_var_mutex &&
121 held_mutex != cp_placeholder_mutex_.load(std::memory_order_relaxed)) {
122 // placeholder_mutex may still be nullptr. That's OK.
123 CHECK(Locks::IsExpectedOnWeakRefAccess(held_mutex))
124 << "Holding unexpected mutex " << held_mutex->GetName()
125 << " when accessing weak ref";
126 }
127 }
128 }
129 } else {
130 break;
131 }
132 }
133 }
134
CheckEmptyCheckpointFromMutex()135 inline void Thread::CheckEmptyCheckpointFromMutex() {
136 DCHECK_EQ(Thread::Current(), this);
137 for (;;) {
138 if (ReadFlag(ThreadFlag::kEmptyCheckpointRequest)) {
139 RunEmptyCheckpoint();
140 } else {
141 break;
142 }
143 }
144 }
145
SetState(ThreadState new_state)146 inline ThreadState Thread::SetState(ThreadState new_state) {
147 // Should only be used to change between suspended states.
148 // Cannot use this code to change into or from Runnable as changing to Runnable should
149 // fail if the `ThreadFlag::kSuspendRequest` is set and changing from Runnable might
150 // miss passing an active suspend barrier.
151 DCHECK_NE(new_state, ThreadState::kRunnable);
152 if (kIsDebugBuild && this != Thread::Current()) {
153 std::string name;
154 GetThreadName(name);
155 LOG(FATAL) << "Thread \"" << name << "\"(" << this << " != Thread::Current()="
156 << Thread::Current() << ") changing state to " << new_state;
157 }
158
159 while (true) {
160 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
161 CHECK_NE(old_state_and_flags.GetState(), ThreadState::kRunnable)
162 << new_state << " " << *this << " " << *Thread::Current();
163 StateAndFlags new_state_and_flags = old_state_and_flags.WithState(new_state);
164 bool done =
165 tls32_.state_and_flags.CompareAndSetWeakRelaxed(old_state_and_flags.GetValue(),
166 new_state_and_flags.GetValue());
167 if (done) {
168 return static_cast<ThreadState>(old_state_and_flags.GetState());
169 }
170 }
171 }
172
IsThreadSuspensionAllowable()173 inline bool Thread::IsThreadSuspensionAllowable() const {
174 if (tls32_.no_thread_suspension != 0) {
175 return false;
176 }
177 for (int i = kLockLevelCount - 1; i >= 0; --i) {
178 if (i != kMutatorLock &&
179 i != kUserCodeSuspensionLock &&
180 GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) {
181 return false;
182 }
183 }
184 // Thread autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we
185 // have the mutex meaning we need to do this hack.
186 auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS {
187 return tls32_.user_code_suspend_count != 0;
188 };
189 if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) {
190 return false;
191 }
192 return true;
193 }
194
AssertThreadSuspensionIsAllowable(bool check_locks)195 inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
196 if (kIsDebugBuild) {
197 if (gAborting == 0) {
198 CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
199 }
200 if (check_locks) {
201 bool bad_mutexes_held = false;
202 for (int i = kLockLevelCount - 1; i >= 0; --i) {
203 // We expect no locks except the mutator lock. User code suspension lock is OK as long as
204 // we aren't going to be held suspended due to SuspendReason::kForUserCode.
205 if (i != kMutatorLock && i != kUserCodeSuspensionLock) {
206 BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
207 if (held_mutex != nullptr) {
208 LOG(ERROR) << "holding \"" << held_mutex->GetName()
209 << "\" at point where thread suspension is expected";
210 bad_mutexes_held = true;
211 }
212 }
213 }
214 // Make sure that if we hold the user_code_suspension_lock_ we aren't suspending due to
215 // user_code_suspend_count which would prevent the thread from ever waking up. Thread
216 // autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we
217 // have the mutex meaning we need to do this hack.
218 auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS {
219 return tls32_.user_code_suspend_count != 0;
220 };
221 if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) {
222 LOG(ERROR) << "suspending due to user-code while holding \""
223 << Locks::user_code_suspension_lock_->GetName() << "\"! Thread would never "
224 << "wake up.";
225 bad_mutexes_held = true;
226 }
227 if (gAborting == 0) {
228 CHECK(!bad_mutexes_held);
229 }
230 }
231 }
232 }
233
TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)234 inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
235 DCHECK_NE(new_state, ThreadState::kRunnable);
236 while (true) {
237 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
238 DCHECK_EQ(old_state_and_flags.GetState(), ThreadState::kRunnable);
239 if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest))) {
240 IncrementStatsCounter(&checkpoint_count_);
241 RunCheckpointFunction();
242 continue;
243 }
244 if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest))) {
245 RunEmptyCheckpoint();
246 continue;
247 }
248 // Change the state but keep the current flags (kCheckpointRequest is clear).
249 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest));
250 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest));
251 StateAndFlags new_state_and_flags = old_state_and_flags.WithState(new_state);
252
253 // CAS the value, ensuring that prior memory operations are visible to any thread
254 // that observes that we are suspended.
255 bool done =
256 tls32_.state_and_flags.CompareAndSetWeakRelease(old_state_and_flags.GetValue(),
257 new_state_and_flags.GetValue());
258 if (LIKELY(done)) {
259 IncrementStatsCounter(&suspended_count_);
260 break;
261 }
262 }
263 }
264
CheckActiveSuspendBarriers()265 inline void Thread::CheckActiveSuspendBarriers() {
266 DCHECK_NE(GetState(), ThreadState::kRunnable);
267 while (true) {
268 StateAndFlags state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
269 if (LIKELY(!state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest) &&
270 !state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest) &&
271 !state_and_flags.IsFlagSet(ThreadFlag::kActiveSuspendBarrier))) {
272 break;
273 } else if (state_and_flags.IsFlagSet(ThreadFlag::kActiveSuspendBarrier)) {
274 PassActiveSuspendBarriers();
275 } else {
276 // Impossible
277 LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint";
278 }
279 }
280 }
281
CheckBarrierInactive(WrappedSuspend1Barrier * suspend1_barrier)282 inline void Thread::CheckBarrierInactive(WrappedSuspend1Barrier* suspend1_barrier) {
283 for (WrappedSuspend1Barrier* w = tlsPtr_.active_suspend1_barriers; w != nullptr; w = w->next_) {
284 CHECK_EQ(w->magic_, WrappedSuspend1Barrier::kMagic)
285 << "first = " << tlsPtr_.active_suspend1_barriers << " current = " << w
286 << " next = " << w->next_;
287 CHECK_NE(w, suspend1_barrier);
288 }
289 }
290
AddSuspend1Barrier(WrappedSuspend1Barrier * suspend1_barrier)291 inline void Thread::AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier) {
292 if (tlsPtr_.active_suspend1_barriers != nullptr) {
293 CHECK_EQ(tlsPtr_.active_suspend1_barriers->magic_, WrappedSuspend1Barrier::kMagic)
294 << "first = " << tlsPtr_.active_suspend1_barriers;
295 }
296 CHECK_EQ(suspend1_barrier->magic_, WrappedSuspend1Barrier::kMagic);
297 suspend1_barrier->next_ = tlsPtr_.active_suspend1_barriers;
298 tlsPtr_.active_suspend1_barriers = suspend1_barrier;
299 }
300
RemoveFirstSuspend1Barrier(WrappedSuspend1Barrier * suspend1_barrier)301 inline void Thread::RemoveFirstSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier) {
302 DCHECK_EQ(tlsPtr_.active_suspend1_barriers, suspend1_barrier);
303 tlsPtr_.active_suspend1_barriers = tlsPtr_.active_suspend1_barriers->next_;
304 }
305
RemoveSuspend1Barrier(WrappedSuspend1Barrier * barrier)306 inline void Thread::RemoveSuspend1Barrier(WrappedSuspend1Barrier* barrier) {
307 // 'barrier' should be in the list. If not, we will get a SIGSEGV with fault address of 4 or 8.
308 WrappedSuspend1Barrier** last = &tlsPtr_.active_suspend1_barriers;
309 while (*last != barrier) {
310 last = &((*last)->next_);
311 }
312 *last = (*last)->next_;
313 }
314
HasActiveSuspendBarrier()315 inline bool Thread::HasActiveSuspendBarrier() {
316 return tlsPtr_.active_suspend1_barriers != nullptr ||
317 tlsPtr_.active_suspendall_barrier != nullptr;
318 }
319
TransitionFromRunnableToSuspended(ThreadState new_state)320 inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
321 // Note: JNI stubs inline a fast path of this method that transitions to suspended if
322 // there are no flags set and then clears the `held_mutexes[kMutatorLock]` (this comes
323 // from a specialized `BaseMutex::RegisterAsLockedImpl(., kMutatorLock)` inlined from
324 // the `GetMutatorLock()->TransitionFromRunnableToSuspended(this)` below).
325 // Therefore any code added here (other than debug build assertions) should be gated
326 // on some flag being set, so that the JNI stub can take the slow path to get here.
327 AssertThreadSuspensionIsAllowable();
328 PoisonObjectPointersIfDebug();
329 DCHECK_EQ(this, Thread::Current());
330 // Change to non-runnable state, thereby appearing suspended to the system.
331 TransitionToSuspendedAndRunCheckpoints(new_state);
332 // Mark the release of the share of the mutator lock.
333 GetMutatorLock()->TransitionFromRunnableToSuspended(this);
334 // Once suspended - check the active suspend barrier flag
335 CheckActiveSuspendBarriers();
336 }
337
TransitionFromSuspendedToRunnable(bool fail_on_suspend_req)338 inline ThreadState Thread::TransitionFromSuspendedToRunnable(bool fail_on_suspend_req) {
339 // Note: JNI stubs inline a fast path of this method that transitions to Runnable if
340 // there are no flags set and then stores the mutator lock to `held_mutexes[kMutatorLock]`
341 // (this comes from a specialized `BaseMutex::RegisterAsUnlockedImpl(., kMutatorLock)`
342 // inlined from the `GetMutatorLock()->TransitionFromSuspendedToRunnable(this)` below).
343 // Therefore any code added here (other than debug build assertions) should be gated
344 // on some flag being set, so that the JNI stub can take the slow path to get here.
345 DCHECK(this == Current());
346 StateAndFlags old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
347 ThreadState old_state = old_state_and_flags.GetState();
348 DCHECK_NE(old_state, ThreadState::kRunnable);
349 while (true) {
350 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kSuspensionImmune));
351 GetMutatorLock()->AssertNotHeld(this); // Otherwise we starve GC.
352 // Optimize for the return from native code case - this is the fast path.
353 // Atomically change from suspended to runnable if no suspend request pending.
354 constexpr uint32_t kCheckedFlags =
355 SuspendOrCheckpointRequestFlags() |
356 enum_cast<uint32_t>(ThreadFlag::kActiveSuspendBarrier) |
357 FlipFunctionFlags();
358 if (LIKELY(!old_state_and_flags.IsAnyOfFlagsSet(kCheckedFlags))) {
359 // CAS the value with a memory barrier.
360 StateAndFlags new_state_and_flags = old_state_and_flags.WithState(ThreadState::kRunnable);
361 if (LIKELY(tls32_.state_and_flags.CompareAndSetWeakAcquire(old_state_and_flags.GetValue(),
362 new_state_and_flags.GetValue()))) {
363 // Mark the acquisition of a share of the mutator lock.
364 GetMutatorLock()->TransitionFromSuspendedToRunnable(this);
365 break;
366 }
367 } else if (old_state_and_flags.IsFlagSet(ThreadFlag::kActiveSuspendBarrier)) {
368 PassActiveSuspendBarriers();
369 } else if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kCheckpointRequest) ||
370 old_state_and_flags.IsFlagSet(ThreadFlag::kEmptyCheckpointRequest))) {
371 // Checkpoint flags should not be set while in suspended state.
372 static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
373 LOG(FATAL) << "Transitioning to Runnable with checkpoint flag,"
374 // Note: Keeping unused flags. If they are set, it points to memory corruption.
375 << " flags=" << old_state_and_flags.WithState(ThreadState::kRunnable).GetValue()
376 << " state=" << old_state_and_flags.GetState();
377 } else if (old_state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest)) {
378 auto fake_mutator_locker = []() SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
379 NO_THREAD_SAFETY_ANALYSIS {};
380 if (fail_on_suspend_req) {
381 // Should get here EXTREMELY rarely.
382 fake_mutator_locker(); // We lie to make thread-safety analysis mostly work. See thread.h.
383 return ThreadState::kInvalidState;
384 }
385 // Wait while our suspend count is non-zero.
386
387 // We pass null to the MutexLock as we may be in a situation where the
388 // runtime is shutting down. Guarding ourselves from that situation
389 // requires to take the shutdown lock, which is undesirable here.
390 Thread* thread_to_pass = nullptr;
391 if (kIsDebugBuild && !IsDaemon()) {
392 // We know we can make our debug locking checks on non-daemon threads,
393 // so re-enable them on debug builds.
394 thread_to_pass = this;
395 }
396 MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
397 // Reload state and flags after locking the mutex.
398 old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
399 DCHECK_EQ(old_state, old_state_and_flags.GetState());
400 while (old_state_and_flags.IsFlagSet(ThreadFlag::kSuspendRequest)) {
401 // Re-check when Thread::resume_cond_ is notified.
402 Thread::resume_cond_->Wait(thread_to_pass);
403 // Reload state and flags after waiting.
404 old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
405 DCHECK_EQ(old_state, old_state_and_flags.GetState());
406 }
407 DCHECK_EQ(GetSuspendCount(), 0);
408 } else if (UNLIKELY(old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction))) {
409 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction));
410 // Do this before transitioning to runnable, both because we shouldn't wait in a runnable
411 // state, and so that the thread running the flip function can DCHECK we're not runnable.
412 WaitForFlipFunction(this);
413 } else if (old_state_and_flags.IsFlagSet(ThreadFlag::kPendingFlipFunction)) {
414 // Logically acquire mutator lock in shared mode.
415 DCHECK(!old_state_and_flags.IsFlagSet(ThreadFlag::kRunningFlipFunction));
416 if (EnsureFlipFunctionStarted(this, this, old_state_and_flags)) {
417 break;
418 }
419 }
420 // Reload state and flags.
421 old_state_and_flags = GetStateAndFlags(std::memory_order_relaxed);
422 DCHECK_EQ(old_state, old_state_and_flags.GetState());
423 }
424 DCHECK_EQ(this->GetState(), ThreadState::kRunnable);
425 return static_cast<ThreadState>(old_state);
426 }
427
AllocTlab(size_t bytes)428 inline mirror::Object* Thread::AllocTlab(size_t bytes) {
429 DCHECK_GE(TlabSize(), bytes);
430 ++tlsPtr_.thread_local_objects;
431 mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos);
432 tlsPtr_.thread_local_pos += bytes;
433 return ret;
434 }
435
PushOnThreadLocalAllocationStack(mirror::Object * obj)436 inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
437 DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end);
438 if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
439 // There's room.
440 DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) +
441 sizeof(StackReference<mirror::Object>),
442 reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end));
443 DCHECK(tlsPtr_.thread_local_alloc_stack_top->AsMirrorPtr() == nullptr);
444 tlsPtr_.thread_local_alloc_stack_top->Assign(obj);
445 ++tlsPtr_.thread_local_alloc_stack_top;
446 return true;
447 }
448 return false;
449 }
450
GetWeakRefAccessEnabled()451 inline bool Thread::GetWeakRefAccessEnabled() const {
452 DCHECK(gUseReadBarrier);
453 DCHECK(this == Thread::Current());
454 WeakRefAccessState s = tls32_.weak_ref_access_enabled.load(std::memory_order_relaxed);
455 if (LIKELY(s == WeakRefAccessState::kVisiblyEnabled)) {
456 return true;
457 }
458 s = tls32_.weak_ref_access_enabled.load(std::memory_order_acquire);
459 if (s == WeakRefAccessState::kVisiblyEnabled) {
460 return true;
461 } else if (s == WeakRefAccessState::kDisabled) {
462 return false;
463 }
464 DCHECK(s == WeakRefAccessState::kEnabled)
465 << "state = " << static_cast<std::underlying_type_t<WeakRefAccessState>>(s);
466 // The state is only changed back to DISABLED during a checkpoint. Thus no other thread can
467 // change the value concurrently here. No other thread reads the value we store here, so there
468 // is no need for a release store.
469 tls32_.weak_ref_access_enabled.store(WeakRefAccessState::kVisiblyEnabled,
470 std::memory_order_relaxed);
471 return true;
472 }
473
SetThreadLocalAllocationStack(StackReference<mirror::Object> * start,StackReference<mirror::Object> * end)474 inline void Thread::SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
475 StackReference<mirror::Object>* end) {
476 DCHECK(Thread::Current() == this) << "Should be called by self";
477 DCHECK(start != nullptr);
478 DCHECK(end != nullptr);
479 DCHECK_ALIGNED(start, sizeof(StackReference<mirror::Object>));
480 DCHECK_ALIGNED(end, sizeof(StackReference<mirror::Object>));
481 DCHECK_LT(start, end);
482 tlsPtr_.thread_local_alloc_stack_end = end;
483 tlsPtr_.thread_local_alloc_stack_top = start;
484 }
485
RevokeThreadLocalAllocationStack()486 inline void Thread::RevokeThreadLocalAllocationStack() {
487 if (kIsDebugBuild) {
488 // Note: self is not necessarily equal to this thread since thread may be suspended.
489 Thread* self = Thread::Current();
490 DCHECK(this == self || GetState() != ThreadState::kRunnable)
491 << GetState() << " thread " << this << " self " << self;
492 }
493 tlsPtr_.thread_local_alloc_stack_end = nullptr;
494 tlsPtr_.thread_local_alloc_stack_top = nullptr;
495 }
496
PoisonObjectPointersIfDebug()497 inline void Thread::PoisonObjectPointersIfDebug() {
498 if (kObjPtrPoisoning) {
499 Thread::Current()->PoisonObjectPointers();
500 }
501 }
502
IncrementSuspendCount(Thread * self,AtomicInteger * suspendall_barrier,WrappedSuspend1Barrier * suspend1_barrier,SuspendReason reason)503 inline void Thread::IncrementSuspendCount(Thread* self,
504 AtomicInteger* suspendall_barrier,
505 WrappedSuspend1Barrier* suspend1_barrier,
506 SuspendReason reason) {
507 if (kIsDebugBuild) {
508 Locks::thread_suspend_count_lock_->AssertHeld(self);
509 if (this != self) {
510 Locks::thread_list_lock_->AssertHeld(self);
511 }
512 }
513 if (UNLIKELY(reason == SuspendReason::kForUserCode)) {
514 Locks::user_code_suspension_lock_->AssertHeld(self);
515 }
516
517 uint32_t flags = enum_cast<uint32_t>(ThreadFlag::kSuspendRequest);
518 if (suspendall_barrier != nullptr) {
519 DCHECK(suspend1_barrier == nullptr);
520 DCHECK(tlsPtr_.active_suspendall_barrier == nullptr);
521 tlsPtr_.active_suspendall_barrier = suspendall_barrier;
522 flags |= enum_cast<uint32_t>(ThreadFlag::kActiveSuspendBarrier);
523 } else if (suspend1_barrier != nullptr) {
524 AddSuspend1Barrier(suspend1_barrier);
525 flags |= enum_cast<uint32_t>(ThreadFlag::kActiveSuspendBarrier);
526 }
527
528 ++tls32_.suspend_count;
529 if (reason == SuspendReason::kForUserCode) {
530 ++tls32_.user_code_suspend_count;
531 }
532
533 // Two bits might be set simultaneously.
534 tls32_.state_and_flags.fetch_or(flags, std::memory_order_release);
535 TriggerSuspend();
536 }
537
IncrementSuspendCount(Thread * self)538 inline void Thread::IncrementSuspendCount(Thread* self) {
539 IncrementSuspendCount(self, nullptr, nullptr, SuspendReason::kInternal);
540 }
541
DecrementSuspendCount(Thread * self,bool for_user_code)542 inline void Thread::DecrementSuspendCount(Thread* self, bool for_user_code) {
543 DCHECK(ReadFlag(ThreadFlag::kSuspendRequest));
544 Locks::thread_suspend_count_lock_->AssertHeld(self);
545 if (UNLIKELY(tls32_.suspend_count <= 0)) {
546 UnsafeLogFatalForSuspendCount(self, this);
547 UNREACHABLE();
548 }
549 if (for_user_code) {
550 Locks::user_code_suspension_lock_->AssertHeld(self);
551 if (UNLIKELY(tls32_.user_code_suspend_count <= 0)) {
552 LOG(ERROR) << "user_code_suspend_count incorrect";
553 UnsafeLogFatalForSuspendCount(self, this);
554 UNREACHABLE();
555 }
556 --tls32_.user_code_suspend_count;
557 }
558
559 --tls32_.suspend_count;
560
561 if (tls32_.suspend_count == 0) {
562 AtomicClearFlag(ThreadFlag::kSuspendRequest, std::memory_order_release);
563 }
564 }
565
PushShadowFrame(ShadowFrame * new_top_frame)566 inline ShadowFrame* Thread::PushShadowFrame(ShadowFrame* new_top_frame) {
567 new_top_frame->CheckConsistentVRegs();
568 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
569 }
570
PopShadowFrame()571 inline ShadowFrame* Thread::PopShadowFrame() {
572 return tlsPtr_.managed_stack.PopShadowFrame();
573 }
574
GetStackEndForInterpreter(bool implicit_overflow_check)575 inline uint8_t* Thread::GetStackEndForInterpreter(bool implicit_overflow_check) const {
576 uint8_t* end = tlsPtr_.stack_end + (implicit_overflow_check
577 ? GetStackOverflowReservedBytes(kRuntimeISA)
578 : 0);
579 if (kIsDebugBuild) {
580 // In a debuggable build, but especially under ASAN, the access-checks interpreter has a
581 // potentially humongous stack size. We don't want to take too much of the stack regularly,
582 // so do not increase the regular reserved size (for compiled code etc) and only report the
583 // virtually smaller stack to the interpreter here.
584 end += GetStackOverflowReservedBytes(kRuntimeISA);
585 }
586 return end;
587 }
588
ResetDefaultStackEnd()589 inline void Thread::ResetDefaultStackEnd() {
590 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
591 // to throw a StackOverflowError.
592 tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
593 }
594
NotifyOnThreadExit(ThreadExitFlag * tef)595 inline void Thread::NotifyOnThreadExit(ThreadExitFlag* tef) {
596 DCHECK_EQ(tef->exited_, false);
597 DCHECK(tlsPtr_.thread_exit_flags == nullptr || !tlsPtr_.thread_exit_flags->exited_);
598 tef->next_ = tlsPtr_.thread_exit_flags;
599 tlsPtr_.thread_exit_flags = tef;
600 if (tef->next_ != nullptr) {
601 DCHECK(!tef->next_->HasExited());
602 tef->next_->prev_ = tef;
603 }
604 tef->prev_ = nullptr;
605 }
606
UnregisterThreadExitFlag(ThreadExitFlag * tef)607 inline void Thread::UnregisterThreadExitFlag(ThreadExitFlag* tef) {
608 if (tef->HasExited()) {
609 // List is no longer used; each client will deallocate its own ThreadExitFlag.
610 return;
611 }
612 DCHECK(IsRegistered(tef));
613 // Remove tef from the list.
614 if (tef->next_ != nullptr) {
615 tef->next_->prev_ = tef->prev_;
616 }
617 if (tef->prev_ == nullptr) {
618 DCHECK_EQ(tlsPtr_.thread_exit_flags, tef);
619 tlsPtr_.thread_exit_flags = tef->next_;
620 } else {
621 DCHECK_NE(tlsPtr_.thread_exit_flags, tef);
622 tef->prev_->next_ = tef->next_;
623 }
624 DCHECK(tlsPtr_.thread_exit_flags == nullptr || tlsPtr_.thread_exit_flags->prev_ == nullptr);
625 }
626
DCheckUnregisteredEverywhere(ThreadExitFlag * first,ThreadExitFlag * last)627 inline void Thread::DCheckUnregisteredEverywhere(ThreadExitFlag* first, ThreadExitFlag* last) {
628 if (!kIsDebugBuild) {
629 return;
630 }
631 Thread* self = Thread::Current();
632 MutexLock mu(self, *Locks::thread_list_lock_);
633 Runtime::Current()->GetThreadList()->ForEach([&](Thread* t) REQUIRES(Locks::thread_list_lock_) {
634 for (ThreadExitFlag* tef = t->tlsPtr_.thread_exit_flags; tef != nullptr; tef = tef->next_) {
635 CHECK(tef < first || tef > last)
636 << "tef = " << std::hex << tef << " first = " << first << std::dec;
637 }
638 // Also perform a minimal consistency check on each list.
639 ThreadExitFlag* flags = t->tlsPtr_.thread_exit_flags;
640 CHECK(flags == nullptr || flags->prev_ == nullptr);
641 });
642 }
643
IsRegistered(ThreadExitFlag * query_tef)644 inline bool Thread::IsRegistered(ThreadExitFlag* query_tef) {
645 for (ThreadExitFlag* tef = tlsPtr_.thread_exit_flags; tef != nullptr; tef = tef->next_) {
646 if (tef == query_tef) {
647 return true;
648 }
649 }
650 return false;
651 }
652
DisallowPreMonitorMutexes()653 inline void Thread::DisallowPreMonitorMutexes() {
654 if (kIsDebugBuild) {
655 CHECK(this == Thread::Current());
656 CHECK(GetHeldMutex(kMonitorLock) == nullptr);
657 // Pretend we hold a kMonitorLock level mutex to detect disallowed mutex
658 // acquisitions by checkpoint Run() methods. We don't normally register or thus check
659 // kMonitorLock level mutexes, but this is an exception.
660 Mutex* ph = cp_placeholder_mutex_.load(std::memory_order_acquire);
661 if (UNLIKELY(ph == nullptr)) {
662 Mutex* new_ph = new Mutex("checkpoint placeholder mutex", kMonitorLock);
663 if (LIKELY(cp_placeholder_mutex_.compare_exchange_strong(ph, new_ph))) {
664 ph = new_ph;
665 } else {
666 // ph now has the value set by another thread.
667 delete new_ph;
668 }
669 }
670 SetHeldMutex(kMonitorLock, ph);
671 }
672 }
673
674 // Undo the effect of the previous call. Again only invoked by the thread itself.
AllowPreMonitorMutexes()675 inline void Thread::AllowPreMonitorMutexes() {
676 if (kIsDebugBuild) {
677 CHECK_EQ(GetHeldMutex(kMonitorLock), cp_placeholder_mutex_.load(std::memory_order_relaxed));
678 SetHeldMutex(kMonitorLock, nullptr);
679 }
680 }
681
682 } // namespace art
683
684 #endif // ART_RUNTIME_THREAD_INL_H_
685