1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_THREAD_INL_H_
18 #define ART_RUNTIME_THREAD_INL_H_
19
20 #include "thread.h"
21
22 #ifdef ART_TARGET_ANDROID
23 #include <bionic_tls.h> // Access to our own TLS slot.
24 #endif
25
26 #include <pthread.h>
27
28 #include "base/casts.h"
29 #include "base/mutex-inl.h"
30 #include "gc/heap.h"
31 #include "jni_env_ext.h"
32 #include "obj_ptr.h"
33 #include "runtime.h"
34 #include "thread_pool.h"
35
36 namespace art {
37
38 // Quickly access the current thread from a JNIEnv.
ThreadForEnv(JNIEnv * env)39 static inline Thread* ThreadForEnv(JNIEnv* env) {
40 JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env));
41 return full_env->self;
42 }
43
Current()44 inline Thread* Thread::Current() {
45 // We rely on Thread::Current returning null for a detached thread, so it's not obvious
46 // that we can replace this with a direct %fs access on x86.
47 if (!is_started_) {
48 return nullptr;
49 } else {
50 #ifdef ART_TARGET_ANDROID
51 void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF];
52 #else
53 void* thread = pthread_getspecific(Thread::pthread_key_self_);
54 #endif
55 return reinterpret_cast<Thread*>(thread);
56 }
57 }
58
AllowThreadSuspension()59 inline void Thread::AllowThreadSuspension() {
60 DCHECK_EQ(Thread::Current(), this);
61 if (UNLIKELY(TestAllFlags())) {
62 CheckSuspend();
63 }
64 // Invalidate the current thread's object pointers (ObjPtr) to catch possible moving GC bugs due
65 // to missing handles.
66 PoisonObjectPointers();
67 }
68
CheckSuspend()69 inline void Thread::CheckSuspend() {
70 DCHECK_EQ(Thread::Current(), this);
71 for (;;) {
72 if (ReadFlag(kCheckpointRequest)) {
73 RunCheckpointFunction();
74 } else if (ReadFlag(kSuspendRequest)) {
75 FullSuspendCheck();
76 } else if (ReadFlag(kEmptyCheckpointRequest)) {
77 RunEmptyCheckpoint();
78 } else {
79 break;
80 }
81 }
82 }
83
CheckEmptyCheckpointFromWeakRefAccess(BaseMutex * cond_var_mutex)84 inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) {
85 Thread* self = Thread::Current();
86 DCHECK_EQ(self, this);
87 for (;;) {
88 if (ReadFlag(kEmptyCheckpointRequest)) {
89 RunEmptyCheckpoint();
90 // Check we hold only an expected mutex when accessing weak ref.
91 if (kIsDebugBuild) {
92 for (int i = kLockLevelCount - 1; i >= 0; --i) {
93 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
94 if (held_mutex != nullptr &&
95 held_mutex != Locks::mutator_lock_ &&
96 held_mutex != cond_var_mutex) {
97 CHECK(Locks::IsExpectedOnWeakRefAccess(held_mutex))
98 << "Holding unexpected mutex " << held_mutex->GetName()
99 << " when accessing weak ref";
100 }
101 }
102 }
103 } else {
104 break;
105 }
106 }
107 }
108
CheckEmptyCheckpointFromMutex()109 inline void Thread::CheckEmptyCheckpointFromMutex() {
110 DCHECK_EQ(Thread::Current(), this);
111 for (;;) {
112 if (ReadFlag(kEmptyCheckpointRequest)) {
113 RunEmptyCheckpoint();
114 } else {
115 break;
116 }
117 }
118 }
119
SetState(ThreadState new_state)120 inline ThreadState Thread::SetState(ThreadState new_state) {
121 // Should only be used to change between suspended states.
122 // Cannot use this code to change into or from Runnable as changing to Runnable should
123 // fail if old_state_and_flags.suspend_request is true and changing from Runnable might
124 // miss passing an active suspend barrier.
125 DCHECK_NE(new_state, kRunnable);
126 if (kIsDebugBuild && this != Thread::Current()) {
127 std::string name;
128 GetThreadName(name);
129 LOG(FATAL) << "Thread \"" << name << "\"(" << this << " != Thread::Current()="
130 << Thread::Current() << ") changing state to " << new_state;
131 }
132 union StateAndFlags old_state_and_flags;
133 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
134 CHECK_NE(old_state_and_flags.as_struct.state, kRunnable);
135 tls32_.state_and_flags.as_struct.state = new_state;
136 return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
137 }
138
IsThreadSuspensionAllowable()139 inline bool Thread::IsThreadSuspensionAllowable() const {
140 if (tls32_.no_thread_suspension != 0) {
141 return false;
142 }
143 for (int i = kLockLevelCount - 1; i >= 0; --i) {
144 if (i != kMutatorLock && GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) {
145 return false;
146 }
147 }
148 return true;
149 }
150
AssertThreadSuspensionIsAllowable(bool check_locks)151 inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
152 if (kIsDebugBuild) {
153 if (gAborting == 0) {
154 CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
155 }
156 if (check_locks) {
157 bool bad_mutexes_held = false;
158 for (int i = kLockLevelCount - 1; i >= 0; --i) {
159 // We expect no locks except the mutator_lock_ or thread list suspend thread lock.
160 if (i != kMutatorLock) {
161 BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
162 if (held_mutex != nullptr) {
163 LOG(ERROR) << "holding \"" << held_mutex->GetName()
164 << "\" at point where thread suspension is expected";
165 bad_mutexes_held = true;
166 }
167 }
168 }
169 if (gAborting == 0) {
170 CHECK(!bad_mutexes_held);
171 }
172 }
173 }
174 }
175
TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)176 inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
177 DCHECK_NE(new_state, kRunnable);
178 DCHECK_EQ(GetState(), kRunnable);
179 union StateAndFlags old_state_and_flags;
180 union StateAndFlags new_state_and_flags;
181 while (true) {
182 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
183 if (UNLIKELY((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0)) {
184 RunCheckpointFunction();
185 continue;
186 }
187 if (UNLIKELY((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest) != 0)) {
188 RunEmptyCheckpoint();
189 continue;
190 }
191 // Change the state but keep the current flags (kCheckpointRequest is clear).
192 DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0);
193 DCHECK_EQ((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest), 0);
194 new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags;
195 new_state_and_flags.as_struct.state = new_state;
196
197 // CAS the value with a memory ordering.
198 bool done =
199 tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelease(old_state_and_flags.as_int,
200 new_state_and_flags.as_int);
201 if (LIKELY(done)) {
202 break;
203 }
204 }
205 }
206
PassActiveSuspendBarriers()207 inline void Thread::PassActiveSuspendBarriers() {
208 while (true) {
209 uint16_t current_flags = tls32_.state_and_flags.as_struct.flags;
210 if (LIKELY((current_flags &
211 (kCheckpointRequest | kEmptyCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
212 break;
213 } else if ((current_flags & kActiveSuspendBarrier) != 0) {
214 PassActiveSuspendBarriers(this);
215 } else {
216 // Impossible
217 LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint";
218 }
219 }
220 }
221
TransitionFromRunnableToSuspended(ThreadState new_state)222 inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
223 AssertThreadSuspensionIsAllowable();
224 PoisonObjectPointersIfDebug();
225 DCHECK_EQ(this, Thread::Current());
226 // Change to non-runnable state, thereby appearing suspended to the system.
227 TransitionToSuspendedAndRunCheckpoints(new_state);
228 // Mark the release of the share of the mutator_lock_.
229 Locks::mutator_lock_->TransitionFromRunnableToSuspended(this);
230 // Once suspended - check the active suspend barrier flag
231 PassActiveSuspendBarriers();
232 }
233
TransitionFromSuspendedToRunnable()234 inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
235 union StateAndFlags old_state_and_flags;
236 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
237 int16_t old_state = old_state_and_flags.as_struct.state;
238 DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable);
239 do {
240 Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC..
241 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
242 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
243 if (LIKELY(old_state_and_flags.as_struct.flags == 0)) {
244 // Optimize for the return from native code case - this is the fast path.
245 // Atomically change from suspended to runnable if no suspend request pending.
246 union StateAndFlags new_state_and_flags;
247 new_state_and_flags.as_int = old_state_and_flags.as_int;
248 new_state_and_flags.as_struct.state = kRunnable;
249 // CAS the value with a memory barrier.
250 if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakAcquire(
251 old_state_and_flags.as_int,
252 new_state_and_flags.as_int))) {
253 // Mark the acquisition of a share of the mutator_lock_.
254 Locks::mutator_lock_->TransitionFromSuspendedToRunnable(this);
255 break;
256 }
257 } else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) {
258 PassActiveSuspendBarriers(this);
259 } else if ((old_state_and_flags.as_struct.flags &
260 (kCheckpointRequest | kEmptyCheckpointRequest)) != 0) {
261 // Impossible
262 LOG(FATAL) << "Transitioning to runnable with checkpoint flag, "
263 << " flags=" << old_state_and_flags.as_struct.flags
264 << " state=" << old_state_and_flags.as_struct.state;
265 } else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
266 // Wait while our suspend count is non-zero.
267
268 // We pass null to the MutexLock as we may be in a situation where the
269 // runtime is shutting down. Guarding ourselves from that situation
270 // requires to take the shutdown lock, which is undesirable here.
271 Thread* thread_to_pass = nullptr;
272 if (kIsDebugBuild && !IsDaemon()) {
273 // We know we can make our debug locking checks on non-daemon threads,
274 // so re-enable them on debug builds.
275 thread_to_pass = this;
276 }
277 MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
278 ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this);
279 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
280 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
281 while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
282 // Re-check when Thread::resume_cond_ is notified.
283 Thread::resume_cond_->Wait(thread_to_pass);
284 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
285 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
286 }
287 DCHECK_EQ(GetSuspendCount(), 0);
288 }
289 } while (true);
290 // Run the flip function, if set.
291 Closure* flip_func = GetFlipFunction();
292 if (flip_func != nullptr) {
293 flip_func->Run(this);
294 }
295 return static_cast<ThreadState>(old_state);
296 }
297
VerifyStack()298 inline void Thread::VerifyStack() {
299 if (kVerifyStack) {
300 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) {
301 VerifyStackImpl();
302 }
303 }
304 }
305
AllocTlab(size_t bytes)306 inline mirror::Object* Thread::AllocTlab(size_t bytes) {
307 DCHECK_GE(TlabSize(), bytes);
308 ++tlsPtr_.thread_local_objects;
309 mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos);
310 tlsPtr_.thread_local_pos += bytes;
311 return ret;
312 }
313
PushOnThreadLocalAllocationStack(mirror::Object * obj)314 inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
315 DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end);
316 if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
317 // There's room.
318 DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) +
319 sizeof(StackReference<mirror::Object>),
320 reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end));
321 DCHECK(tlsPtr_.thread_local_alloc_stack_top->AsMirrorPtr() == nullptr);
322 tlsPtr_.thread_local_alloc_stack_top->Assign(obj);
323 ++tlsPtr_.thread_local_alloc_stack_top;
324 return true;
325 }
326 return false;
327 }
328
SetThreadLocalAllocationStack(StackReference<mirror::Object> * start,StackReference<mirror::Object> * end)329 inline void Thread::SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
330 StackReference<mirror::Object>* end) {
331 DCHECK(Thread::Current() == this) << "Should be called by self";
332 DCHECK(start != nullptr);
333 DCHECK(end != nullptr);
334 DCHECK_ALIGNED(start, sizeof(StackReference<mirror::Object>));
335 DCHECK_ALIGNED(end, sizeof(StackReference<mirror::Object>));
336 DCHECK_LT(start, end);
337 tlsPtr_.thread_local_alloc_stack_end = end;
338 tlsPtr_.thread_local_alloc_stack_top = start;
339 }
340
RevokeThreadLocalAllocationStack()341 inline void Thread::RevokeThreadLocalAllocationStack() {
342 if (kIsDebugBuild) {
343 // Note: self is not necessarily equal to this thread since thread may be suspended.
344 Thread* self = Thread::Current();
345 DCHECK(this == self || IsSuspended() || GetState() == kWaitingPerformingGc)
346 << GetState() << " thread " << this << " self " << self;
347 }
348 tlsPtr_.thread_local_alloc_stack_end = nullptr;
349 tlsPtr_.thread_local_alloc_stack_top = nullptr;
350 }
351
PoisonObjectPointersIfDebug()352 inline void Thread::PoisonObjectPointersIfDebug() {
353 if (kObjPtrPoisoning) {
354 Thread::Current()->PoisonObjectPointers();
355 }
356 }
357
ModifySuspendCount(Thread * self,int delta,AtomicInteger * suspend_barrier,bool for_debugger)358 inline bool Thread::ModifySuspendCount(Thread* self,
359 int delta,
360 AtomicInteger* suspend_barrier,
361 bool for_debugger) {
362 if (delta > 0 && ((kUseReadBarrier && this != self) || suspend_barrier != nullptr)) {
363 // When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if
364 // active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop.
365 while (true) {
366 if (LIKELY(ModifySuspendCountInternal(self, delta, suspend_barrier, for_debugger))) {
367 return true;
368 } else {
369 // Failure means the list of active_suspend_barriers is full or we are in the middle of a
370 // thread flip, we should release the thread_suspend_count_lock_ (to avoid deadlock) and
371 // wait till the target thread has executed or Thread::PassActiveSuspendBarriers() or the
372 // flip function. Note that we could not simply wait for the thread to change to a suspended
373 // state, because it might need to run checkpoint function before the state change or
374 // resumes from the resume_cond_, which also needs thread_suspend_count_lock_.
375 //
376 // The list of active_suspend_barriers is very unlikely to be full since more than
377 // kMaxSuspendBarriers threads need to execute SuspendAllInternal() simultaneously, and
378 // target thread stays in kRunnable in the mean time.
379 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
380 NanoSleep(100000);
381 Locks::thread_suspend_count_lock_->ExclusiveLock(self);
382 }
383 }
384 } else {
385 return ModifySuspendCountInternal(self, delta, suspend_barrier, for_debugger);
386 }
387 }
388
389 } // namespace art
390
391 #endif // ART_RUNTIME_THREAD_INL_H_
392