1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "thread_list.h"
18 
19 #define ATRACE_TAG ATRACE_TAG_DALVIK
20 
21 #include <cutils/trace.h>
22 #include <dirent.h>
23 #include <ScopedLocalRef.h>
24 #include <ScopedUtfChars.h>
25 #include <sys/types.h>
26 #include <unistd.h>
27 
28 #include <sstream>
29 
30 #include "base/histogram-inl.h"
31 #include "base/mutex.h"
32 #include "base/mutex-inl.h"
33 #include "base/time_utils.h"
34 #include "base/timing_logger.h"
35 #include "debugger.h"
36 #include "jni_internal.h"
37 #include "lock_word.h"
38 #include "monitor.h"
39 #include "scoped_thread_state_change.h"
40 #include "thread.h"
41 #include "trace.h"
42 #include "well_known_classes.h"
43 
44 namespace art {
45 
46 static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
47 static constexpr uint64_t kThreadSuspendTimeoutMs = 30 * 1000;  // 30s.
48 // Use 0 since we want to yield to prevent blocking for an unpredictable amount of time.
49 static constexpr useconds_t kThreadSuspendInitialSleepUs = 0;
50 static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000;
51 static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
52 
ThreadList()53 ThreadList::ThreadList()
54     : suspend_all_count_(0), debug_suspend_all_count_(0), unregistering_count_(0),
55       suspend_all_historam_("suspend all histogram", 16, 64), long_suspend_(false) {
56   CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
57 }
58 
~ThreadList()59 ThreadList::~ThreadList() {
60   // Detach the current thread if necessary. If we failed to start, there might not be any threads.
61   // We need to detach the current thread here in case there's another thread waiting to join with
62   // us.
63   bool contains = false;
64   {
65     Thread* self = Thread::Current();
66     MutexLock mu(self, *Locks::thread_list_lock_);
67     contains = Contains(self);
68   }
69   if (contains) {
70     Runtime::Current()->DetachCurrentThread();
71   }
72   WaitForOtherNonDaemonThreadsToExit();
73   // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
74   //       Thread::Init.
75   SuspendAllDaemonThreads();
76 }
77 
Contains(Thread * thread)78 bool ThreadList::Contains(Thread* thread) {
79   return find(list_.begin(), list_.end(), thread) != list_.end();
80 }
81 
Contains(pid_t tid)82 bool ThreadList::Contains(pid_t tid) {
83   for (const auto& thread : list_) {
84     if (thread->GetTid() == tid) {
85       return true;
86     }
87   }
88   return false;
89 }
90 
GetLockOwner()91 pid_t ThreadList::GetLockOwner() {
92   return Locks::thread_list_lock_->GetExclusiveOwnerTid();
93 }
94 
DumpNativeStacks(std::ostream & os)95 void ThreadList::DumpNativeStacks(std::ostream& os) {
96   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
97   for (const auto& thread : list_) {
98     os << "DUMPING THREAD " << thread->GetTid() << "\n";
99     DumpNativeStack(os, thread->GetTid(), "\t");
100     os << "\n";
101   }
102 }
103 
DumpForSigQuit(std::ostream & os)104 void ThreadList::DumpForSigQuit(std::ostream& os) {
105   {
106     ScopedObjectAccess soa(Thread::Current());
107     // Only print if we have samples.
108     if (suspend_all_historam_.SampleSize() > 0) {
109       Histogram<uint64_t>::CumulativeData data;
110       suspend_all_historam_.CreateHistogram(&data);
111       suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data);  // Dump time to suspend.
112     }
113   }
114   Dump(os);
115   DumpUnattachedThreads(os);
116 }
117 
DumpUnattachedThread(std::ostream & os,pid_t tid)118 static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_ANALYSIS {
119   // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
120   // refactor DumpState to avoid skipping analysis.
121   Thread::DumpState(os, nullptr, tid);
122   DumpKernelStack(os, tid, "  kernel: ", false);
123   // TODO: Reenable this when the native code in system_server can handle it.
124   // Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
125   if (false) {
126     DumpNativeStack(os, tid, "  native: ");
127   }
128   os << "\n";
129 }
130 
DumpUnattachedThreads(std::ostream & os)131 void ThreadList::DumpUnattachedThreads(std::ostream& os) {
132   DIR* d = opendir("/proc/self/task");
133   if (!d) {
134     return;
135   }
136 
137   Thread* self = Thread::Current();
138   dirent* e;
139   while ((e = readdir(d)) != nullptr) {
140     char* end;
141     pid_t tid = strtol(e->d_name, &end, 10);
142     if (!*end) {
143       bool contains;
144       {
145         MutexLock mu(self, *Locks::thread_list_lock_);
146         contains = Contains(tid);
147       }
148       if (!contains) {
149         DumpUnattachedThread(os, tid);
150       }
151     }
152   }
153   closedir(d);
154 }
155 
156 // Dump checkpoint timeout in milliseconds. Larger amount on the host, as dumping will invoke
157 // addr2line when available.
158 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 10000 : 20000;
159 
160 // A closure used by Thread::Dump.
161 class DumpCheckpoint FINAL : public Closure {
162  public:
DumpCheckpoint(std::ostream * os)163   explicit DumpCheckpoint(std::ostream* os) : os_(os), barrier_(0) {}
164 
Run(Thread * thread)165   void Run(Thread* thread) OVERRIDE {
166     // Note thread and self may not be equal if thread was already suspended at the point of the
167     // request.
168     Thread* self = Thread::Current();
169     std::ostringstream local_os;
170     {
171       ScopedObjectAccess soa(self);
172       thread->Dump(local_os);
173     }
174     local_os << "\n";
175     {
176       // Use the logging lock to ensure serialization when writing to the common ostream.
177       MutexLock mu(self, *Locks::logging_lock_);
178       *os_ << local_os.str();
179     }
180     if (thread->GetState() == kRunnable) {
181       barrier_.Pass(self);
182     }
183   }
184 
WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint)185   void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
186     Thread* self = Thread::Current();
187     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
188     bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
189     if (timed_out) {
190       // Avoid a recursive abort.
191       LOG((kIsDebugBuild && (gAborting == 0)) ? FATAL : ERROR)
192           << "Unexpected time out during dump checkpoint.";
193     }
194   }
195 
196  private:
197   // The common stream that will accumulate all the dumps.
198   std::ostream* const os_;
199   // The barrier to be passed through and for the requestor to wait upon.
200   Barrier barrier_;
201 };
202 
Dump(std::ostream & os)203 void ThreadList::Dump(std::ostream& os) {
204   {
205     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
206     os << "DALVIK THREADS (" << list_.size() << "):\n";
207   }
208   DumpCheckpoint checkpoint(&os);
209   size_t threads_running_checkpoint = RunCheckpoint(&checkpoint);
210   if (threads_running_checkpoint != 0) {
211     checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
212   }
213 }
214 
AssertThreadsAreSuspended(Thread * self,Thread * ignore1,Thread * ignore2)215 void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
216   MutexLock mu(self, *Locks::thread_list_lock_);
217   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
218   for (const auto& thread : list_) {
219     if (thread != ignore1 && thread != ignore2) {
220       CHECK(thread->IsSuspended())
221             << "\nUnsuspended thread: <<" << *thread << "\n"
222             << "self: <<" << *Thread::Current();
223     }
224   }
225 }
226 
227 #if HAVE_TIMED_RWLOCK
228 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
UnsafeLogFatalForThreadSuspendAllTimeout()229 NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
230   Runtime* runtime = Runtime::Current();
231   std::ostringstream ss;
232   ss << "Thread suspend timeout\n";
233   Locks::mutator_lock_->Dump(ss);
234   ss << "\n";
235   runtime->GetThreadList()->Dump(ss);
236   LOG(FATAL) << ss.str();
237   exit(0);
238 }
239 #endif
240 
241 // Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
242 // individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then
243 // we use sched_yield instead of calling usleep.
ThreadSuspendSleep(useconds_t delay_us)244 static void ThreadSuspendSleep(useconds_t delay_us) {
245   if (delay_us == 0) {
246     sched_yield();
247   } else {
248     usleep(delay_us);
249   }
250 }
251 
RunCheckpoint(Closure * checkpoint_function)252 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
253   Thread* self = Thread::Current();
254   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
255   Locks::thread_list_lock_->AssertNotHeld(self);
256   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
257   if (kDebugLocking && gAborting == 0) {
258     CHECK_NE(self->GetState(), kRunnable);
259   }
260 
261   std::vector<Thread*> suspended_count_modified_threads;
262   size_t count = 0;
263   {
264     // Call a checkpoint function for each thread, threads which are suspend get their checkpoint
265     // manually called.
266     MutexLock mu(self, *Locks::thread_list_lock_);
267     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
268     for (const auto& thread : list_) {
269       if (thread != self) {
270         while (true) {
271           if (thread->RequestCheckpoint(checkpoint_function)) {
272             // This thread will run its checkpoint some time in the near future.
273             count++;
274             break;
275           } else {
276             // We are probably suspended, try to make sure that we stay suspended.
277             // The thread switched back to runnable.
278             if (thread->GetState() == kRunnable) {
279               // Spurious fail, try again.
280               continue;
281             }
282             thread->ModifySuspendCount(self, +1, false);
283             suspended_count_modified_threads.push_back(thread);
284             break;
285           }
286         }
287       }
288     }
289   }
290 
291   // Run the checkpoint on ourself while we wait for threads to suspend.
292   checkpoint_function->Run(self);
293 
294   // Run the checkpoint on the suspended threads.
295   for (const auto& thread : suspended_count_modified_threads) {
296     if (!thread->IsSuspended()) {
297       if (ATRACE_ENABLED()) {
298         std::ostringstream oss;
299         thread->ShortDump(oss);
300         ATRACE_BEGIN((std::string("Waiting for suspension of thread ") + oss.str()).c_str());
301       }
302       // Busy wait until the thread is suspended.
303       const uint64_t start_time = NanoTime();
304       do {
305         ThreadSuspendSleep(kThreadSuspendInitialSleepUs);
306       } while (!thread->IsSuspended());
307       const uint64_t total_delay = NanoTime() - start_time;
308       // Shouldn't need to wait for longer than 1000 microseconds.
309       constexpr uint64_t kLongWaitThreshold = MsToNs(1);
310       ATRACE_END();
311       if (UNLIKELY(total_delay > kLongWaitThreshold)) {
312         LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for "
313             << *thread << " suspension!";
314       }
315     }
316     // We know for sure that the thread is suspended at this point.
317     checkpoint_function->Run(thread);
318     {
319       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
320       thread->ModifySuspendCount(self, -1, false);
321     }
322   }
323 
324   {
325     // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their
326     // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
327     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
328     Thread::resume_cond_->Broadcast(self);
329   }
330 
331   return count;
332 }
333 
334 // Request that a checkpoint function be run on all active (non-suspended)
335 // threads.  Returns the number of successful requests.
RunCheckpointOnRunnableThreads(Closure * checkpoint_function)336 size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
337   Thread* self = Thread::Current();
338   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
339   Locks::thread_list_lock_->AssertNotHeld(self);
340   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
341   CHECK_NE(self->GetState(), kRunnable);
342 
343   size_t count = 0;
344   {
345     // Call a checkpoint function for each non-suspended thread.
346     MutexLock mu(self, *Locks::thread_list_lock_);
347     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
348     for (const auto& thread : list_) {
349       if (thread != self) {
350         if (thread->RequestCheckpoint(checkpoint_function)) {
351           // This thread will run its checkpoint some time in the near future.
352           count++;
353         }
354       }
355     }
356   }
357 
358   // Return the number of threads that will run the checkpoint function.
359   return count;
360 }
361 
362 // A checkpoint/suspend-all hybrid to switch thread roots from
363 // from-space to to-space refs. Used to synchronize threads at a point
364 // to mark the initiation of marking while maintaining the to-space
365 // invariant.
FlipThreadRoots(Closure * thread_flip_visitor,Closure * flip_callback,gc::collector::GarbageCollector * collector)366 size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
367                                    gc::collector::GarbageCollector* collector) {
368   TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
369   const uint64_t start_time = NanoTime();
370   Thread* self = Thread::Current();
371   Locks::mutator_lock_->AssertNotHeld(self);
372   Locks::thread_list_lock_->AssertNotHeld(self);
373   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
374   CHECK_NE(self->GetState(), kRunnable);
375 
376   std::vector<Thread*> runnable_threads;
377   std::vector<Thread*> other_threads;
378 
379   // Suspend all threads once.
380   {
381     MutexLock mu(self, *Locks::thread_list_lock_);
382     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
383     // Update global suspend all state for attaching threads.
384     ++suspend_all_count_;
385     // Increment everybody's suspend count (except our own).
386     for (const auto& thread : list_) {
387       if (thread == self) {
388         continue;
389       }
390       thread->ModifySuspendCount(self, +1, false);
391     }
392   }
393 
394   // Run the flip callback for the collector.
395   Locks::mutator_lock_->ExclusiveLock(self);
396   flip_callback->Run(self);
397   Locks::mutator_lock_->ExclusiveUnlock(self);
398   collector->RegisterPause(NanoTime() - start_time);
399 
400   // Resume runnable threads.
401   {
402     MutexLock mu(self, *Locks::thread_list_lock_);
403     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
404     --suspend_all_count_;
405     for (const auto& thread : list_) {
406       if (thread == self) {
407         continue;
408       }
409       // Set the flip function for both runnable and suspended threads
410       // because Thread::DumpState/DumpJavaStack() (invoked by a
411       // checkpoint) may cause the flip function to be run for a
412       // runnable/suspended thread before a runnable threads runs it
413       // for itself or we run it for a suspended thread below.
414       thread->SetFlipFunction(thread_flip_visitor);
415       if (thread->IsSuspendedAtSuspendCheck()) {
416         // The thread will resume right after the broadcast.
417         thread->ModifySuspendCount(self, -1, false);
418         runnable_threads.push_back(thread);
419       } else {
420         other_threads.push_back(thread);
421       }
422     }
423     Thread::resume_cond_->Broadcast(self);
424   }
425 
426   // Run the closure on the other threads and let them resume.
427   {
428     ReaderMutexLock mu(self, *Locks::mutator_lock_);
429     for (const auto& thread : other_threads) {
430       Closure* flip_func = thread->GetFlipFunction();
431       if (flip_func != nullptr) {
432         flip_func->Run(thread);
433       }
434     }
435     // Run it for self.
436     thread_flip_visitor->Run(self);
437   }
438 
439   // Resume other threads.
440   {
441     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
442     for (const auto& thread : other_threads) {
443       thread->ModifySuspendCount(self, -1, false);
444     }
445     Thread::resume_cond_->Broadcast(self);
446   }
447 
448   return runnable_threads.size() + other_threads.size() + 1;  // +1 for self.
449 }
450 
SuspendAll(const char * cause,bool long_suspend)451 void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
452   Thread* self = Thread::Current();
453 
454   if (self != nullptr) {
455     VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
456   } else {
457     VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
458   }
459   ATRACE_BEGIN("Suspending mutator threads");
460   const uint64_t start_time = NanoTime();
461 
462   Locks::mutator_lock_->AssertNotHeld(self);
463   Locks::thread_list_lock_->AssertNotHeld(self);
464   Locks::thread_suspend_count_lock_->AssertNotHeld(self);
465   if (kDebugLocking && self != nullptr) {
466     CHECK_NE(self->GetState(), kRunnable);
467   }
468   {
469     MutexLock mu(self, *Locks::thread_list_lock_);
470     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
471     // Update global suspend all state for attaching threads.
472     ++suspend_all_count_;
473     // Increment everybody's suspend count (except our own).
474     for (const auto& thread : list_) {
475       if (thread == self) {
476         continue;
477       }
478       VLOG(threads) << "requesting thread suspend: " << *thread;
479       thread->ModifySuspendCount(self, +1, false);
480     }
481   }
482 
483   // Block on the mutator lock until all Runnable threads release their share of access.
484 #if HAVE_TIMED_RWLOCK
485   while (true) {
486     if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) {
487       break;
488     } else if (!long_suspend_) {
489       // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
490       // could result in a thread suspend timeout.
491       // Timeout if we wait more than kThreadSuspendTimeoutMs seconds.
492       UnsafeLogFatalForThreadSuspendAllTimeout();
493     }
494   }
495 #else
496   Locks::mutator_lock_->ExclusiveLock(self);
497 #endif
498 
499   long_suspend_ = long_suspend;
500 
501   const uint64_t end_time = NanoTime();
502   const uint64_t suspend_time = end_time - start_time;
503   suspend_all_historam_.AdjustAndAddValue(suspend_time);
504   if (suspend_time > kLongThreadSuspendThreshold) {
505     LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time);
506   }
507 
508   if (kDebugLocking) {
509     // Debug check that all threads are suspended.
510     AssertThreadsAreSuspended(self, self);
511   }
512 
513   ATRACE_END();
514   ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str());
515 
516   if (self != nullptr) {
517     VLOG(threads) << *self << " SuspendAll complete";
518   } else {
519     VLOG(threads) << "Thread[null] SuspendAll complete";
520   }
521 }
522 
ResumeAll()523 void ThreadList::ResumeAll() {
524   Thread* self = Thread::Current();
525 
526   if (self != nullptr) {
527     VLOG(threads) << *self << " ResumeAll starting";
528   } else {
529     VLOG(threads) << "Thread[null] ResumeAll starting";
530   }
531 
532   ATRACE_END();
533   ATRACE_BEGIN("Resuming mutator threads");
534 
535   if (kDebugLocking) {
536     // Debug check that all threads are suspended.
537     AssertThreadsAreSuspended(self, self);
538   }
539 
540   long_suspend_ = false;
541 
542   Locks::mutator_lock_->ExclusiveUnlock(self);
543   {
544     MutexLock mu(self, *Locks::thread_list_lock_);
545     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
546     // Update global suspend all state for attaching threads.
547     --suspend_all_count_;
548     // Decrement the suspend counts for all threads.
549     for (const auto& thread : list_) {
550       if (thread == self) {
551         continue;
552       }
553       thread->ModifySuspendCount(self, -1, false);
554     }
555 
556     // Broadcast a notification to all suspended threads, some or all of
557     // which may choose to wake up.  No need to wait for them.
558     if (self != nullptr) {
559       VLOG(threads) << *self << " ResumeAll waking others";
560     } else {
561       VLOG(threads) << "Thread[null] ResumeAll waking others";
562     }
563     Thread::resume_cond_->Broadcast(self);
564   }
565   ATRACE_END();
566 
567   if (self != nullptr) {
568     VLOG(threads) << *self << " ResumeAll complete";
569   } else {
570     VLOG(threads) << "Thread[null] ResumeAll complete";
571   }
572 }
573 
Resume(Thread * thread,bool for_debugger)574 void ThreadList::Resume(Thread* thread, bool for_debugger) {
575   // This assumes there was an ATRACE_BEGIN when we suspended the thread.
576   ATRACE_END();
577 
578   Thread* self = Thread::Current();
579   DCHECK_NE(thread, self);
580   VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..."
581       << (for_debugger ? " (debugger)" : "");
582 
583   {
584     // To check Contains.
585     MutexLock mu(self, *Locks::thread_list_lock_);
586     // To check IsSuspended.
587     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
588     DCHECK(thread->IsSuspended());
589     if (!Contains(thread)) {
590       // We only expect threads within the thread-list to have been suspended otherwise we can't
591       // stop such threads from delete-ing themselves.
592       LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
593           << ") thread not within thread list";
594       return;
595     }
596     thread->ModifySuspendCount(self, -1, for_debugger);
597   }
598 
599   {
600     VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others";
601     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
602     Thread::resume_cond_->Broadcast(self);
603   }
604 
605   VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
606 }
607 
ThreadSuspendByPeerWarning(Thread * self,LogSeverity severity,const char * message,jobject peer)608 static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const char* message,
609                                        jobject peer) {
610   JNIEnvExt* env = self->GetJniEnv();
611   ScopedLocalRef<jstring>
612       scoped_name_string(env, (jstring)env->GetObjectField(
613           peer, WellKnownClasses::java_lang_Thread_name));
614   ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
615   if (scoped_name_chars.c_str() == nullptr) {
616       LOG(severity) << message << ": " << peer;
617       env->ExceptionClear();
618   } else {
619       LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str();
620   }
621 }
622 
SuspendThreadByPeer(jobject peer,bool request_suspension,bool debug_suspension,bool * timed_out)623 Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
624                                         bool debug_suspension, bool* timed_out) {
625   const uint64_t start_time = NanoTime();
626   useconds_t sleep_us = kThreadSuspendInitialSleepUs;
627   *timed_out = false;
628   Thread* const self = Thread::Current();
629   Thread* suspended_thread = nullptr;
630   VLOG(threads) << "SuspendThreadByPeer starting";
631   while (true) {
632     Thread* thread;
633     {
634       // Note: this will transition to runnable and potentially suspend. We ensure only one thread
635       // is requesting another suspend, to avoid deadlock, by requiring this function be called
636       // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
637       // than request thread suspension, to avoid potential cycles in threads requesting each other
638       // suspend.
639       ScopedObjectAccess soa(self);
640       MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
641       thread = Thread::FromManagedThread(soa, peer);
642       if (thread == nullptr) {
643         if (suspended_thread != nullptr) {
644           MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
645           // If we incremented the suspend count but the thread reset its peer, we need to
646           // re-decrement it since it is shutting down and may deadlock the runtime in
647           // ThreadList::WaitForOtherNonDaemonThreadsToExit.
648           suspended_thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
649         }
650         ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
651         return nullptr;
652       }
653       if (!Contains(thread)) {
654         CHECK(suspended_thread == nullptr);
655         VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
656             << reinterpret_cast<void*>(thread);
657         return nullptr;
658       }
659       VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
660       {
661         MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
662         if (request_suspension) {
663           if (self->GetSuspendCount() > 0) {
664             // We hold the suspend count lock but another thread is trying to suspend us. Its not
665             // safe to try to suspend another thread in case we get a cycle. Start the loop again
666             // which will allow this thread to be suspended.
667             continue;
668           }
669           CHECK(suspended_thread == nullptr);
670           suspended_thread = thread;
671           suspended_thread->ModifySuspendCount(self, +1, debug_suspension);
672           request_suspension = false;
673         } else {
674           // If the caller isn't requesting suspension, a suspension should have already occurred.
675           CHECK_GT(thread->GetSuspendCount(), 0);
676         }
677         // IsSuspended on the current thread will fail as the current thread is changed into
678         // Runnable above. As the suspend count is now raised if this is the current thread
679         // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
680         // to just explicitly handle the current thread in the callers to this code.
681         CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
682         // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
683         // count, or else we've waited and it has self suspended) or is the current thread, we're
684         // done.
685         if (thread->IsSuspended()) {
686           VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
687           if (ATRACE_ENABLED()) {
688             std::string name;
689             thread->GetThreadName(name);
690             ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(),
691                                       peer).c_str());
692           }
693           return thread;
694         }
695         const uint64_t total_delay = NanoTime() - start_time;
696         if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
697           ThreadSuspendByPeerWarning(self, FATAL, "Thread suspension timed out", peer);
698           if (suspended_thread != nullptr) {
699             CHECK_EQ(suspended_thread, thread);
700             suspended_thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
701           }
702           *timed_out = true;
703           return nullptr;
704         } else if (sleep_us == 0 &&
705             total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
706           // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
707           // excessive CPU usage.
708           sleep_us = kThreadSuspendMaxYieldUs / 2;
709         }
710       }
711       // Release locks and come out of runnable state.
712     }
713     VLOG(threads) << "SuspendThreadByPeer waiting to allow thread chance to suspend";
714     ThreadSuspendSleep(sleep_us);
715     // This may stay at 0 if sleep_us == 0, but this is WAI since we want to avoid using usleep at
716     // all if possible. This shouldn't be an issue since time to suspend should always be small.
717     sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
718   }
719 }
720 
ThreadSuspendByThreadIdWarning(LogSeverity severity,const char * message,uint32_t thread_id)721 static void ThreadSuspendByThreadIdWarning(LogSeverity severity, const char* message,
722                                            uint32_t thread_id) {
723   LOG(severity) << StringPrintf("%s: %d", message, thread_id);
724 }
725 
SuspendThreadByThreadId(uint32_t thread_id,bool debug_suspension,bool * timed_out)726 Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension,
727                                             bool* timed_out) {
728   const uint64_t start_time = NanoTime();
729   useconds_t sleep_us = kThreadSuspendInitialSleepUs;
730   *timed_out = false;
731   Thread* suspended_thread = nullptr;
732   Thread* const self = Thread::Current();
733   CHECK_NE(thread_id, kInvalidThreadId);
734   VLOG(threads) << "SuspendThreadByThreadId starting";
735   while (true) {
736     {
737       // Note: this will transition to runnable and potentially suspend. We ensure only one thread
738       // is requesting another suspend, to avoid deadlock, by requiring this function be called
739       // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
740       // than request thread suspension, to avoid potential cycles in threads requesting each other
741       // suspend.
742       ScopedObjectAccess soa(self);
743       MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
744       Thread* thread = nullptr;
745       for (const auto& it : list_) {
746         if (it->GetThreadId() == thread_id) {
747           thread = it;
748           break;
749         }
750       }
751       if (thread == nullptr) {
752         CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread
753             << " no longer in thread list";
754         // There's a race in inflating a lock and the owner giving up ownership and then dying.
755         ThreadSuspendByThreadIdWarning(WARNING, "No such thread id for suspend", thread_id);
756         return nullptr;
757       }
758       VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
759       DCHECK(Contains(thread));
760       {
761         MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
762         if (suspended_thread == nullptr) {
763           if (self->GetSuspendCount() > 0) {
764             // We hold the suspend count lock but another thread is trying to suspend us. Its not
765             // safe to try to suspend another thread in case we get a cycle. Start the loop again
766             // which will allow this thread to be suspended.
767             continue;
768           }
769           thread->ModifySuspendCount(self, +1, debug_suspension);
770           suspended_thread = thread;
771         } else {
772           CHECK_EQ(suspended_thread, thread);
773           // If the caller isn't requesting suspension, a suspension should have already occurred.
774           CHECK_GT(thread->GetSuspendCount(), 0);
775         }
776         // IsSuspended on the current thread will fail as the current thread is changed into
777         // Runnable above. As the suspend count is now raised if this is the current thread
778         // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
779         // to just explicitly handle the current thread in the callers to this code.
780         CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
781         // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
782         // count, or else we've waited and it has self suspended) or is the current thread, we're
783         // done.
784         if (thread->IsSuspended()) {
785           if (ATRACE_ENABLED()) {
786             std::string name;
787             thread->GetThreadName(name);
788             ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d",
789                                       name.c_str(), thread_id).c_str());
790           }
791           VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
792           return thread;
793         }
794         const uint64_t total_delay = NanoTime() - start_time;
795         if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
796           ThreadSuspendByThreadIdWarning(WARNING, "Thread suspension timed out", thread_id);
797           if (suspended_thread != nullptr) {
798             thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
799           }
800           *timed_out = true;
801           return nullptr;
802         } else if (sleep_us == 0 &&
803             total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
804           // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
805           // excessive CPU usage.
806           sleep_us = kThreadSuspendMaxYieldUs / 2;
807         }
808       }
809       // Release locks and come out of runnable state.
810     }
811     VLOG(threads) << "SuspendThreadByThreadId waiting to allow thread chance to suspend";
812     ThreadSuspendSleep(sleep_us);
813     sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
814   }
815 }
816 
FindThreadByThreadId(uint32_t thin_lock_id)817 Thread* ThreadList::FindThreadByThreadId(uint32_t thin_lock_id) {
818   Thread* self = Thread::Current();
819   MutexLock mu(self, *Locks::thread_list_lock_);
820   for (const auto& thread : list_) {
821     if (thread->GetThreadId() == thin_lock_id) {
822       CHECK(thread == self || thread->IsSuspended());
823       return thread;
824     }
825   }
826   return nullptr;
827 }
828 
SuspendAllForDebugger()829 void ThreadList::SuspendAllForDebugger() {
830   Thread* self = Thread::Current();
831   Thread* debug_thread = Dbg::GetDebugThread();
832 
833   VLOG(threads) << *self << " SuspendAllForDebugger starting...";
834 
835   {
836     MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
837     {
838       MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
839       // Update global suspend all state for attaching threads.
840       DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
841       ++suspend_all_count_;
842       ++debug_suspend_all_count_;
843       // Increment everybody's suspend count (except our own).
844       for (const auto& thread : list_) {
845         if (thread == self || thread == debug_thread) {
846           continue;
847         }
848         VLOG(threads) << "requesting thread suspend: " << *thread;
849         thread->ModifySuspendCount(self, +1, true);
850       }
851     }
852   }
853 
854   // Block on the mutator lock until all Runnable threads release their share of access then
855   // immediately unlock again.
856 #if HAVE_TIMED_RWLOCK
857   // Timeout if we wait more than 30 seconds.
858   if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
859     UnsafeLogFatalForThreadSuspendAllTimeout();
860   } else {
861     Locks::mutator_lock_->ExclusiveUnlock(self);
862   }
863 #else
864   Locks::mutator_lock_->ExclusiveLock(self);
865   Locks::mutator_lock_->ExclusiveUnlock(self);
866 #endif
867   AssertThreadsAreSuspended(self, self, debug_thread);
868 
869   VLOG(threads) << *self << " SuspendAllForDebugger complete";
870 }
871 
SuspendSelfForDebugger()872 void ThreadList::SuspendSelfForDebugger() {
873   Thread* const self = Thread::Current();
874   self->SetReadyForDebugInvoke(true);
875 
876   // The debugger thread must not suspend itself due to debugger activity!
877   Thread* debug_thread = Dbg::GetDebugThread();
878   CHECK(self != debug_thread);
879   CHECK_NE(self->GetState(), kRunnable);
880   Locks::mutator_lock_->AssertNotHeld(self);
881 
882   // The debugger may have detached while we were executing an invoke request. In that case, we
883   // must not suspend ourself.
884   DebugInvokeReq* pReq = self->GetInvokeReq();
885   const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
886   if (!skip_thread_suspension) {
887     // Collisions with other suspends aren't really interesting. We want
888     // to ensure that we're the only one fiddling with the suspend count
889     // though.
890     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
891     self->ModifySuspendCount(self, +1, true);
892     CHECK_GT(self->GetSuspendCount(), 0);
893 
894     VLOG(threads) << *self << " self-suspending (debugger)";
895   } else {
896     // We must no longer be subject to debugger suspension.
897     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
898     CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
899 
900     VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
901   }
902 
903   // If the debugger requested an invoke, we need to send the reply and clear the request.
904   if (pReq != nullptr) {
905     Dbg::FinishInvokeMethod(pReq);
906     self->ClearDebugInvokeReq();
907     pReq = nullptr;  // object has been deleted, clear it for safety.
908   }
909 
910   // Tell JDWP that we've completed suspension. The JDWP thread can't
911   // tell us to resume before we're fully asleep because we hold the
912   // suspend count lock.
913   Dbg::ClearWaitForEventThread();
914 
915   {
916     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
917     while (self->GetSuspendCount() != 0) {
918       Thread::resume_cond_->Wait(self);
919       if (self->GetSuspendCount() != 0) {
920         // The condition was signaled but we're still suspended. This
921         // can happen when we suspend then resume all threads to
922         // update instrumentation or compute monitor info. This can
923         // also happen if the debugger lets go while a SIGQUIT thread
924         // dump event is pending (assuming SignalCatcher was resumed for
925         // just long enough to try to grab the thread-suspend lock).
926         VLOG(jdwp) << *self << " still suspended after undo "
927                    << "(suspend count=" << self->GetSuspendCount() << ", "
928                    << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
929       }
930     }
931     CHECK_EQ(self->GetSuspendCount(), 0);
932   }
933 
934   self->SetReadyForDebugInvoke(false);
935   VLOG(threads) << *self << " self-reviving (debugger)";
936 }
937 
ResumeAllForDebugger()938 void ThreadList::ResumeAllForDebugger() {
939   Thread* self = Thread::Current();
940   Thread* debug_thread = Dbg::GetDebugThread();
941 
942   VLOG(threads) << *self << " ResumeAllForDebugger starting...";
943 
944   // Threads can't resume if we exclusively hold the mutator lock.
945   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
946 
947   {
948     MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
949     {
950       MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
951       // Update global suspend all state for attaching threads.
952       DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
953       if (debug_suspend_all_count_ > 0) {
954         --suspend_all_count_;
955         --debug_suspend_all_count_;
956       } else {
957         // We've been asked to resume all threads without being asked to
958         // suspend them all before. That may happen if a debugger tries
959         // to resume some suspended threads (with suspend count == 1)
960         // at once with a VirtualMachine.Resume command. Let's print a
961         // warning.
962         LOG(WARNING) << "Debugger attempted to resume all threads without "
963                      << "having suspended them all before.";
964       }
965       // Decrement everybody's suspend count (except our own).
966       for (const auto& thread : list_) {
967         if (thread == self || thread == debug_thread) {
968           continue;
969         }
970         if (thread->GetDebugSuspendCount() == 0) {
971           // This thread may have been individually resumed with ThreadReference.Resume.
972           continue;
973         }
974         VLOG(threads) << "requesting thread resume: " << *thread;
975         thread->ModifySuspendCount(self, -1, true);
976       }
977     }
978   }
979 
980   {
981     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
982     Thread::resume_cond_->Broadcast(self);
983   }
984 
985   VLOG(threads) << *self << " ResumeAllForDebugger complete";
986 }
987 
UndoDebuggerSuspensions()988 void ThreadList::UndoDebuggerSuspensions() {
989   Thread* self = Thread::Current();
990 
991   VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
992 
993   {
994     MutexLock mu(self, *Locks::thread_list_lock_);
995     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
996     // Update global suspend all state for attaching threads.
997     suspend_all_count_ -= debug_suspend_all_count_;
998     debug_suspend_all_count_ = 0;
999     // Update running threads.
1000     for (const auto& thread : list_) {
1001       if (thread == self || thread->GetDebugSuspendCount() == 0) {
1002         continue;
1003       }
1004       thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), true);
1005     }
1006   }
1007 
1008   {
1009     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1010     Thread::resume_cond_->Broadcast(self);
1011   }
1012 
1013   VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
1014 }
1015 
WaitForOtherNonDaemonThreadsToExit()1016 void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
1017   Thread* self = Thread::Current();
1018   Locks::mutator_lock_->AssertNotHeld(self);
1019   while (true) {
1020     {
1021       // No more threads can be born after we start to shutdown.
1022       MutexLock mu(self, *Locks::runtime_shutdown_lock_);
1023       CHECK(Runtime::Current()->IsShuttingDownLocked());
1024       CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
1025     }
1026     MutexLock mu(self, *Locks::thread_list_lock_);
1027     // Also wait for any threads that are unregistering to finish. This is required so that no
1028     // threads access the thread list after it is deleted. TODO: This may not work for user daemon
1029     // threads since they could unregister at the wrong time.
1030     bool done = unregistering_count_ == 0;
1031     if (done) {
1032       for (const auto& thread : list_) {
1033         if (thread != self && !thread->IsDaemon()) {
1034           done = false;
1035           break;
1036         }
1037       }
1038     }
1039     if (done) {
1040       break;
1041     }
1042     // Wait for another thread to exit before re-checking.
1043     Locks::thread_exit_cond_->Wait(self);
1044   }
1045 }
1046 
SuspendAllDaemonThreads()1047 void ThreadList::SuspendAllDaemonThreads() {
1048   Thread* self = Thread::Current();
1049   MutexLock mu(self, *Locks::thread_list_lock_);
1050   {  // Tell all the daemons it's time to suspend.
1051     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1052     for (const auto& thread : list_) {
1053       // This is only run after all non-daemon threads have exited, so the remainder should all be
1054       // daemons.
1055       CHECK(thread->IsDaemon()) << *thread;
1056       if (thread != self) {
1057         thread->ModifySuspendCount(self, +1, false);
1058       }
1059     }
1060   }
1061   // Give the threads a chance to suspend, complaining if they're slow.
1062   bool have_complained = false;
1063   for (int i = 0; i < 10; ++i) {
1064     usleep(200 * 1000);
1065     bool all_suspended = true;
1066     for (const auto& thread : list_) {
1067       if (thread != self && thread->GetState() == kRunnable) {
1068         if (!have_complained) {
1069           LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
1070           have_complained = true;
1071         }
1072         all_suspended = false;
1073       }
1074     }
1075     if (all_suspended) {
1076       return;
1077     }
1078   }
1079   LOG(ERROR) << "suspend all daemons failed";
1080 }
Register(Thread * self)1081 void ThreadList::Register(Thread* self) {
1082   DCHECK_EQ(self, Thread::Current());
1083 
1084   if (VLOG_IS_ON(threads)) {
1085     std::ostringstream oss;
1086     self->ShortDump(oss);  // We don't hold the mutator_lock_ yet and so cannot call Dump.
1087     LOG(INFO) << "ThreadList::Register() " << *self  << "\n" << oss.str();
1088   }
1089 
1090   // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1091   // SuspendAll requests.
1092   MutexLock mu(self, *Locks::thread_list_lock_);
1093   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1094   CHECK_GE(suspend_all_count_, debug_suspend_all_count_);
1095   // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While
1096   // this isn't particularly efficient the suspend counts are most commonly 0 or 1.
1097   for (int delta = debug_suspend_all_count_; delta > 0; delta--) {
1098     self->ModifySuspendCount(self, +1, true);
1099   }
1100   for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) {
1101     self->ModifySuspendCount(self, +1, false);
1102   }
1103   CHECK(!Contains(self));
1104   list_.push_back(self);
1105 }
1106 
Unregister(Thread * self)1107 void ThreadList::Unregister(Thread* self) {
1108   DCHECK_EQ(self, Thread::Current());
1109   CHECK_NE(self->GetState(), kRunnable);
1110   Locks::mutator_lock_->AssertNotHeld(self);
1111 
1112   VLOG(threads) << "ThreadList::Unregister() " << *self;
1113 
1114   {
1115     MutexLock mu(self, *Locks::thread_list_lock_);
1116     ++unregistering_count_;
1117   }
1118 
1119   // Any time-consuming destruction, plus anything that can call back into managed code or
1120   // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1121   // causes the threads to join. It is important to do this after incrementing unregistering_count_
1122   // since we want the runtime to wait for the daemon threads to exit before deleting the thread
1123   // list.
1124   self->Destroy();
1125 
1126   // If tracing, remember thread id and name before thread exits.
1127   Trace::StoreExitingThreadInfo(self);
1128 
1129   uint32_t thin_lock_id = self->GetThreadId();
1130   while (true) {
1131     // Remove and delete the Thread* while holding the thread_list_lock_ and
1132     // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
1133     // Note: deliberately not using MutexLock that could hold a stale self pointer.
1134     MutexLock mu(self, *Locks::thread_list_lock_);
1135     if (!Contains(self)) {
1136       std::string thread_name;
1137       self->GetThreadName(thread_name);
1138       std::ostringstream os;
1139       DumpNativeStack(os, GetTid(), "  native: ", nullptr);
1140       LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
1141       break;
1142     } else {
1143       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1144       if (!self->IsSuspended()) {
1145         list_.remove(self);
1146         break;
1147       }
1148     }
1149     // We failed to remove the thread due to a suspend request, loop and try again.
1150   }
1151   delete self;
1152 
1153   // Release the thread ID after the thread is finished and deleted to avoid cases where we can
1154   // temporarily have multiple threads with the same thread id. When this occurs, it causes
1155   // problems in FindThreadByThreadId / SuspendThreadByThreadId.
1156   ReleaseThreadId(nullptr, thin_lock_id);
1157 
1158   // Clear the TLS data, so that the underlying native thread is recognizably detached.
1159   // (It may wish to reattach later.)
1160   CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
1161 
1162   // Signal that a thread just detached.
1163   MutexLock mu(nullptr, *Locks::thread_list_lock_);
1164   --unregistering_count_;
1165   Locks::thread_exit_cond_->Broadcast(nullptr);
1166 }
1167 
ForEach(void (* callback)(Thread *,void *),void * context)1168 void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
1169   for (const auto& thread : list_) {
1170     callback(thread, context);
1171   }
1172 }
1173 
VisitRoots(RootVisitor * visitor) const1174 void ThreadList::VisitRoots(RootVisitor* visitor) const {
1175   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1176   for (const auto& thread : list_) {
1177     thread->VisitRoots(visitor);
1178   }
1179 }
1180 
AllocThreadId(Thread * self)1181 uint32_t ThreadList::AllocThreadId(Thread* self) {
1182   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1183   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
1184     if (!allocated_ids_[i]) {
1185       allocated_ids_.set(i);
1186       return i + 1;  // Zero is reserved to mean "invalid".
1187     }
1188   }
1189   LOG(FATAL) << "Out of internal thread ids";
1190   return 0;
1191 }
1192 
ReleaseThreadId(Thread * self,uint32_t id)1193 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
1194   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1195   --id;  // Zero is reserved to mean "invalid".
1196   DCHECK(allocated_ids_[id]) << id;
1197   allocated_ids_.reset(id);
1198 }
1199 
1200 }  // namespace art
1201