1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mutex.h"
18 
19 #include <errno.h>
20 #include <sys/time.h>
21 
22 #include <sstream>
23 
24 #include "android-base/stringprintf.h"
25 
26 #include "base/atomic.h"
27 #include "base/logging.h"
28 #include "base/systrace.h"
29 #include "base/time_utils.h"
30 #include "base/value_object.h"
31 #include "monitor.h"
32 #include "mutex-inl.h"
33 #include "scoped_thread_state_change-inl.h"
34 #include "thread-inl.h"
35 #include "thread.h"
36 #include "thread_list.h"
37 
38 namespace art HIDDEN {
39 
40 using android::base::StringPrintf;
41 
42 static constexpr uint64_t kIntervalMillis = 50;
43 static constexpr int kMonitorTimeoutTryMax = 5;
44 
45 static const char* kLastDumpStackTime = "LastDumpStackTime";
46 
47 struct AllMutexData {
48   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
49   Atomic<const BaseMutex*> all_mutexes_guard;
50   // All created mutexes guarded by all_mutexes_guard_.
51   std::set<BaseMutex*>* all_mutexes;
AllMutexDataart::AllMutexData52   AllMutexData() : all_mutexes(nullptr) {}
53 };
54 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
55 
56 struct DumpStackLastTimeTLSData : public art::TLSData {
DumpStackLastTimeTLSDataart::DumpStackLastTimeTLSData57   explicit DumpStackLastTimeTLSData(uint64_t last_dump_time_ms)
58       : last_dump_time_ms_(last_dump_time_ms) {}
59   std::atomic<uint64_t> last_dump_time_ms_;
60 };
61 
62 #if ART_USE_FUTEXES
63 // Compute a relative timespec as *result_ts = lhs - rhs.
64 // Return false (and produce an invalid *result_ts) if lhs < rhs.
ComputeRelativeTimeSpec(timespec * result_ts,const timespec & lhs,const timespec & rhs)65 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
66   const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
67   static_assert(std::is_signed<decltype(result_ts->tv_sec)>::value);  // Signed on Linux.
68   result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
69   result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
70   if (result_ts->tv_nsec < 0) {
71     result_ts->tv_sec--;
72     result_ts->tv_nsec += one_sec;
73   }
74   DCHECK(result_ts->tv_nsec >= 0 && result_ts->tv_nsec < one_sec);
75   return result_ts->tv_sec >= 0;
76 }
77 #endif
78 
79 #if ART_USE_FUTEXES
80 // If we wake up from a futex wake, and the runtime disappeared while we were asleep,
81 // it's important to stop in our tracks before we touch deallocated memory.
SleepIfRuntimeDeleted(Thread * self)82 static inline void SleepIfRuntimeDeleted(Thread* self) {
83   if (self != nullptr) {
84     JNIEnvExt* const env = self->GetJniEnv();
85     if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
86       DCHECK(self->IsDaemon());
87       // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
88       // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
89       // --host and --gdb.
90       // After we wake up, the runtime may have been shutdown, which means that this condition may
91       // have been deleted. It is not safe to retry the wait.
92       SleepForever();
93     }
94   }
95 }
96 #else
97 // We should be doing this for pthreads to, but it seems to be impossible for something
98 // like a condition variable wait. Thus we don't bother trying.
99 #endif
100 
101 // Wait for an amount of time that roughly increases in the argument i.
102 // Spin for small arguments and yield/sleep for longer ones.
BackOff(uint32_t i)103 static void BackOff(uint32_t i) {
104   static constexpr uint32_t kSpinMax = 10;
105   static constexpr uint32_t kYieldMax = 20;
106   if (i <= kSpinMax) {
107     // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
108     // test-and-test-and-set loop in the caller.  Possibly skip entirely on a uniprocessor.
109     volatile uint32_t x = 0;
110     const uint32_t spin_count = 10 * i;
111     for (uint32_t spin = 0; spin < spin_count; ++spin) {
112       ++x;  // Volatile; hence should not be optimized away.
113     }
114     // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
115   } else if (i <= kYieldMax) {
116     sched_yield();
117   } else {
118     NanoSleep(1000ull * (i - kYieldMax));
119   }
120 }
121 
122 // Wait until pred(testLoc->load(std::memory_order_relaxed)) holds, or until a
123 // short time interval, on the order of kernel context-switch time, passes.
124 // Return true if the predicate test succeeded, false if we timed out.
125 template<typename Pred>
WaitBrieflyFor(AtomicInteger * testLoc,Thread * self,Pred pred)126 static inline bool WaitBrieflyFor(AtomicInteger* testLoc, Thread* self, Pred pred) {
127   // TODO: Tune these parameters correctly. BackOff(3) should take on the order of 100 cycles. So
128   // this should result in retrying <= 10 times, usually waiting around 100 cycles each. The
129   // maximum delay should be significantly less than the expected futex() context switch time, so
130   // there should be little danger of this worsening things appreciably. If the lock was only
131   // held briefly by a running thread, this should help immensely.
132   static constexpr uint32_t kMaxBackOff = 3;  // Should probably be <= kSpinMax above.
133   static constexpr uint32_t kMaxIters = 50;
134   JNIEnvExt* const env = self == nullptr ? nullptr : self->GetJniEnv();
135   for (uint32_t i = 1; i <= kMaxIters; ++i) {
136     BackOff(std::min(i, kMaxBackOff));
137     if (pred(testLoc->load(std::memory_order_relaxed))) {
138       return true;
139     }
140     if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
141       // This returns true once we've started shutting down. We then try to reach a quiescent
142       // state as soon as possible to avoid touching data that may be deallocated by the shutdown
143       // process. It currently relies on a timeout.
144       return false;
145     }
146   }
147   return false;
148 }
149 
150 class ScopedAllMutexesLock final {
151  public:
ScopedAllMutexesLock(const BaseMutex * mutex)152   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
153     for (uint32_t i = 0;
154          !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex);
155          ++i) {
156       BackOff(i);
157     }
158   }
159 
~ScopedAllMutexesLock()160   ~ScopedAllMutexesLock() {
161     DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_);
162     gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release);
163   }
164 
165  private:
166   const BaseMutex* const mutex_;
167 };
168 
169 // Scoped class that generates events at the beginning and end of lock contention.
170 class ScopedContentionRecorder final : public ValueObject {
171  public:
ScopedContentionRecorder(BaseMutex * mutex,uint64_t blocked_tid,uint64_t owner_tid)172   ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
173       : mutex_(kLogLockContentions ? mutex : nullptr),
174         blocked_tid_(kLogLockContentions ? blocked_tid : 0),
175         owner_tid_(kLogLockContentions ? owner_tid : 0),
176         start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
177     if (ATraceEnabled()) {
178       std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
179                                      mutex->GetName(), owner_tid);
180       ATraceBegin(msg.c_str());
181     }
182   }
183 
~ScopedContentionRecorder()184   ~ScopedContentionRecorder() {
185     ATraceEnd();
186     if (kLogLockContentions) {
187       uint64_t end_nano_time = NanoTime();
188       mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
189     }
190   }
191 
192  private:
193   BaseMutex* const mutex_;
194   const uint64_t blocked_tid_;
195   const uint64_t owner_tid_;
196   const uint64_t start_nano_time_;
197 };
198 
BaseMutex(const char * name,LockLevel level)199 BaseMutex::BaseMutex(const char* name, LockLevel level)
200     : name_(name),
201       level_(level),
202       should_respond_to_empty_checkpoint_request_(false) {
203   if (kLogLockContentions) {
204     ScopedAllMutexesLock mu(this);
205     std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
206     if (*all_mutexes_ptr == nullptr) {
207       // We leak the global set of all mutexes to avoid ordering issues in global variable
208       // construction/destruction.
209       *all_mutexes_ptr = new std::set<BaseMutex*>();
210     }
211     (*all_mutexes_ptr)->insert(this);
212   }
213 }
214 
~BaseMutex()215 BaseMutex::~BaseMutex() {
216   if (kLogLockContentions) {
217     ScopedAllMutexesLock mu(this);
218     gAllMutexData->all_mutexes->erase(this);
219   }
220 }
221 
DumpAll(std::ostream & os)222 void BaseMutex::DumpAll(std::ostream& os) {
223   if (kLogLockContentions) {
224     os << "Mutex logging:\n";
225     ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
226     std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
227     if (all_mutexes == nullptr) {
228       // No mutexes have been created yet during at startup.
229       return;
230     }
231     os << "(Contended)\n";
232     for (const BaseMutex* mutex : *all_mutexes) {
233       if (mutex->HasEverContended()) {
234         mutex->Dump(os);
235         os << "\n";
236       }
237     }
238     os << "(Never contented)\n";
239     for (const BaseMutex* mutex : *all_mutexes) {
240       if (!mutex->HasEverContended()) {
241         mutex->Dump(os);
242         os << "\n";
243       }
244     }
245   }
246 }
247 
CheckSafeToWait(Thread * self)248 void BaseMutex::CheckSafeToWait(Thread* self) {
249   if (!kDebugLocking) {
250     return;
251   }
252   // Avoid repeated reporting of the same violation in the common case.
253   // We somewhat ignore races in the duplicate elision code. The first kMaxReports and the first
254   // report for a given level_ should always appear.
255   static std::atomic<uint> last_level_reported(kLockLevelCount);
256   static constexpr int kMaxReports = 5;
257   static std::atomic<uint> num_reports(0);  // For the current level, more or less.
258 
259   if (self == nullptr) {
260     CheckUnattachedThread(level_);
261   } else if (num_reports.load(std::memory_order_relaxed) > kMaxReports &&
262              last_level_reported.load(std::memory_order_relaxed) == level_) {
263     LOG(ERROR) << "Eliding probably redundant CheckSafeToWait() complaints";
264     return;
265   } else {
266     CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
267         << "Waiting on unacquired mutex: " << name_;
268     bool bad_mutexes_held = false;
269     std::string error_msg;
270     for (int i = kLockLevelCount - 1; i >= 0; --i) {
271       if (i != level_) {
272         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
273         // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This
274         // just means that gc or some other internal process is suspending the thread while it is
275         // trying to suspend some other thread. So long as the current thread is not being suspended
276         // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear)
277         // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted
278         // code interacts with suspension. One holds the lock to prevent user-code-suspension from
279         // occurring. Since this is only initiated from user-supplied native-code this is safe.
280         if (held_mutex == Locks::user_code_suspension_lock_) {
281           // No thread safety analysis is fine since we have both the user_code_suspension_lock_
282           // from the line above and the ThreadSuspendCountLock since it is our level_. We use this
283           // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS.
284           auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS {
285             return self->GetUserCodeSuspendCount() != 0;
286           };
287           if (is_suspending_for_user_code()) {
288             std::ostringstream oss;
289             oss << "Holding \"" << held_mutex->name_ << "\" "
290                 << "(level " << LockLevel(i) << ") while performing wait on "
291                 << "\"" << name_ << "\" (level " << level_ << ") "
292                 << "with SuspendReason::kForUserCode pending suspensions";
293             error_msg = oss.str();
294             LOG(ERROR) << error_msg;
295             bad_mutexes_held = true;
296           }
297         } else if (held_mutex != nullptr) {
298           if (last_level_reported.load(std::memory_order_relaxed) == level_) {
299             num_reports.fetch_add(1, std::memory_order_relaxed);
300           } else {
301             last_level_reported.store(level_, std::memory_order_relaxed);
302             num_reports.store(0, std::memory_order_relaxed);
303           }
304           std::ostringstream oss;
305           oss << "Holding \"" << held_mutex->name_ << "\" "
306               << "(level " << LockLevel(i) << ") while performing wait on "
307               << "\"" << name_ << "\" (level " << level_ << ")";
308           error_msg = oss.str();
309           LOG(ERROR) << error_msg;
310           bad_mutexes_held = true;
311         }
312       }
313     }
314     if (gAborting == 0) {  // Avoid recursive aborts.
315       CHECK(!bad_mutexes_held) << error_msg;
316     }
317   }
318 }
319 
AddToWaitTime(uint64_t value)320 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
321   if (kLogLockContentions) {
322     // Atomically add value to wait_time.
323     wait_time.fetch_add(value, std::memory_order_seq_cst);
324   }
325 }
326 
RecordContention(uint64_t blocked_tid,uint64_t owner_tid,uint64_t nano_time_blocked)327 void BaseMutex::RecordContention(uint64_t blocked_tid,
328                                  uint64_t owner_tid,
329                                  uint64_t nano_time_blocked) {
330   if (kLogLockContentions) {
331     ContentionLogData* data = contention_log_data_;
332     ++(data->contention_count);
333     data->AddToWaitTime(nano_time_blocked);
334     ContentionLogEntry* log = data->contention_log;
335     // This code is intentionally racy as it is only used for diagnostics.
336     int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
337     if (log[slot].blocked_tid == blocked_tid &&
338         log[slot].owner_tid == blocked_tid) {
339       ++log[slot].count;
340     } else {
341       uint32_t new_slot;
342       do {
343         slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
344         new_slot = (slot + 1) % kContentionLogSize;
345       } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot));
346       log[new_slot].blocked_tid = blocked_tid;
347       log[new_slot].owner_tid = owner_tid;
348       log[new_slot].count.store(1, std::memory_order_relaxed);
349     }
350   }
351 }
352 
DumpContention(std::ostream & os) const353 void BaseMutex::DumpContention(std::ostream& os) const {
354   if (kLogLockContentions) {
355     const ContentionLogData* data = contention_log_data_;
356     const ContentionLogEntry* log = data->contention_log;
357     uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed);
358     uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed);
359     if (contention_count == 0) {
360       os << "never contended";
361     } else {
362       os << "contended " << contention_count
363          << " total wait of contender " << PrettyDuration(wait_time)
364          << " average " << PrettyDuration(wait_time / contention_count);
365       SafeMap<uint64_t, size_t> most_common_blocker;
366       SafeMap<uint64_t, size_t> most_common_blocked;
367       for (size_t i = 0; i < kContentionLogSize; ++i) {
368         uint64_t blocked_tid = log[i].blocked_tid;
369         uint64_t owner_tid = log[i].owner_tid;
370         uint32_t count = log[i].count.load(std::memory_order_relaxed);
371         if (count > 0) {
372           auto it = most_common_blocked.find(blocked_tid);
373           if (it != most_common_blocked.end()) {
374             most_common_blocked.Overwrite(blocked_tid, it->second + count);
375           } else {
376             most_common_blocked.Put(blocked_tid, count);
377           }
378           it = most_common_blocker.find(owner_tid);
379           if (it != most_common_blocker.end()) {
380             most_common_blocker.Overwrite(owner_tid, it->second + count);
381           } else {
382             most_common_blocker.Put(owner_tid, count);
383           }
384         }
385       }
386       uint64_t max_tid = 0;
387       size_t max_tid_count = 0;
388       for (const auto& pair : most_common_blocked) {
389         if (pair.second > max_tid_count) {
390           max_tid = pair.first;
391           max_tid_count = pair.second;
392         }
393       }
394       if (max_tid != 0) {
395         os << " sample shows most blocked tid=" << max_tid;
396       }
397       max_tid = 0;
398       max_tid_count = 0;
399       for (const auto& pair : most_common_blocker) {
400         if (pair.second > max_tid_count) {
401           max_tid = pair.first;
402           max_tid_count = pair.second;
403         }
404       }
405       if (max_tid != 0) {
406         os << " sample shows tid=" << max_tid << " owning during this time";
407       }
408     }
409   }
410 }
411 
412 
Mutex(const char * name,LockLevel level,bool recursive)413 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
414     : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) {
415 #if ART_USE_FUTEXES
416   DCHECK_EQ(0, state_and_contenders_.load(std::memory_order_relaxed));
417 #else
418   CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
419 #endif
420 }
421 
422 // Helper to allow checking shutdown while locking for thread safety.
IsSafeToCallAbortSafe()423 static bool IsSafeToCallAbortSafe() {
424   MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
425   return Locks::IsSafeToCallAbortRacy();
426 }
427 
~Mutex()428 Mutex::~Mutex() {
429   bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy();
430 #if ART_USE_FUTEXES
431   if (state_and_contenders_.load(std::memory_order_relaxed) != 0) {
432     LOG(safe_to_call_abort ? FATAL : WARNING)
433         << "destroying mutex with owner or contenders. Owner:" << GetExclusiveOwnerTid();
434   } else {
435     if (GetExclusiveOwnerTid() != 0) {
436       LOG(safe_to_call_abort ? FATAL : WARNING)
437           << "unexpectedly found an owner on unlocked mutex " << name_;
438     }
439   }
440 #else
441   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
442   // may still be using locks.
443   int rc = pthread_mutex_destroy(&mutex_);
444   if (rc != 0) {
445     errno = rc;
446     PLOG(safe_to_call_abort ? FATAL : WARNING)
447         << "pthread_mutex_destroy failed for " << name_;
448   }
449 #endif
450 }
451 
ExclusiveLock(Thread * self)452 void Mutex::ExclusiveLock(Thread* self) {
453   DCHECK(self == nullptr || self == Thread::Current());
454   if (kDebugLocking && !recursive_) {
455     AssertNotHeld(self);
456   }
457   if (!recursive_ || !IsExclusiveHeld(self)) {
458 #if ART_USE_FUTEXES
459     bool done = false;
460     do {
461       int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
462       if (LIKELY((cur_state & kHeldMask) == 0) /* lock not held */) {
463         done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
464       } else {
465         // Failed to acquire, hang up.
466         ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
467         // Empirically, it appears important to spin again each time through the loop; if we
468         // bother to go to sleep and wake up, we should be fairly persistent in trying for the
469         // lock.
470         if (!WaitBrieflyFor(&state_and_contenders_, self,
471                             [](int32_t v) { return (v & kHeldMask) == 0; })) {
472           // Increment contender count. We can't create enough threads for this to overflow.
473           increment_contenders();
474           // Make cur_state again reflect the expected value of state_and_contenders.
475           cur_state += kContenderIncrement;
476           if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
477             self->CheckEmptyCheckpointFromMutex();
478           }
479 
480           uint64_t wait_start_ms = enable_monitor_timeout_ ? MilliTime() : 0;
481           uint64_t try_times = 0;
482           do {
483             timespec timeout_ts;
484             timeout_ts.tv_sec = 0;
485             // NB: Some tests use the mutex without the runtime.
486             timeout_ts.tv_nsec = Runtime::Current() != nullptr
487                 ? Runtime::Current()->GetMonitorTimeoutNs()
488                 : Monitor::kDefaultMonitorTimeoutMs;
489             if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state,
490                       enable_monitor_timeout_ ? &timeout_ts : nullptr , nullptr, 0) != 0) {
491               // We only went to sleep after incrementing and contenders and checking that the
492               // lock is still held by someone else.  EAGAIN and EINTR both indicate a spurious
493               // failure, try again from the beginning.  We don't use TEMP_FAILURE_RETRY so we can
494               // intentionally retry to acquire the lock.
495               if ((errno != EAGAIN) && (errno != EINTR)) {
496                 if (errno == ETIMEDOUT) {
497                   try_times++;
498                   if (try_times <= kMonitorTimeoutTryMax) {
499                     DumpStack(self, wait_start_ms, try_times);
500                   }
501                 } else {
502                   PLOG(FATAL) << "futex wait failed for " << name_;
503                 }
504               }
505             }
506             SleepIfRuntimeDeleted(self);
507             // Retry until not held. In heavy contention situations we otherwise get redundant
508             // futex wakeups as a result of repeatedly decrementing and incrementing contenders.
509             cur_state = state_and_contenders_.load(std::memory_order_relaxed);
510           } while ((cur_state & kHeldMask) != 0);
511           decrement_contenders();
512         }
513       }
514     } while (!done);
515     // Confirm that lock is now held.
516     DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
517 #else
518     CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
519 #endif
520     DCHECK_EQ(GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self)
521                                          << " recursive_ = " << recursive_;
522     exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
523     RegisterAsLocked(self);
524   }
525   recursion_count_++;
526   if (kDebugLocking) {
527     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
528         << name_ << " " << recursion_count_;
529     AssertHeld(self);
530   }
531 }
532 
DumpStack(Thread * self,uint64_t wait_start_ms,uint64_t try_times)533 void Mutex::DumpStack(Thread* self, uint64_t wait_start_ms, uint64_t try_times) {
534   ScopedObjectAccess soa(self);
535   Locks::thread_list_lock_->ExclusiveLock(self);
536   std::string owner_stack_dump;
537   pid_t owner_tid = GetExclusiveOwnerTid();
538   CHECK(Runtime::Current() != nullptr);
539   Thread *owner = Runtime::Current()->GetThreadList()->FindThreadByTid(owner_tid);
540   if (owner != nullptr) {
541     if (IsDumpFrequent(owner, try_times)) {
542       Locks::thread_list_lock_->ExclusiveUnlock(self);
543       LOG(WARNING) << "Contention with tid " << owner_tid << ", monitor id " << monitor_id_;
544       return;
545     }
546     struct CollectStackTrace : public Closure {
547       void Run(art::Thread* thread) override
548         REQUIRES_SHARED(art::Locks::mutator_lock_) {
549         if (IsDumpFrequent(thread)) {
550           return;
551         }
552         DumpStackLastTimeTLSData* tls_data =
553             reinterpret_cast<DumpStackLastTimeTLSData*>(thread->GetCustomTLS(kLastDumpStackTime));
554         if (tls_data == nullptr) {
555           thread->SetCustomTLS(kLastDumpStackTime, new DumpStackLastTimeTLSData(MilliTime()));
556         } else {
557           tls_data->last_dump_time_ms_.store(MilliTime());
558         }
559         thread->DumpJavaStack(oss);
560       }
561       std::ostringstream oss;
562     };
563     CollectStackTrace owner_trace;
564     owner->RequestSynchronousCheckpoint(&owner_trace);
565     owner_stack_dump = owner_trace.oss.str();
566     uint64_t wait_ms = MilliTime() - wait_start_ms;
567     LOG(WARNING) << "Monitor contention with tid " << owner_tid << ", wait time: " << wait_ms
568                  << "ms, monitor id: " << monitor_id_
569                  << "\nPerfMonitor owner thread(" << owner_tid << ") stack is:\n"
570                  << owner_stack_dump;
571   } else {
572     Locks::thread_list_lock_->ExclusiveUnlock(self);
573   }
574 }
575 
IsDumpFrequent(Thread * thread,uint64_t try_times)576 bool Mutex::IsDumpFrequent(Thread* thread, uint64_t try_times) {
577   uint64_t last_dump_time_ms = 0;
578   DumpStackLastTimeTLSData* tls_data =
579       reinterpret_cast<DumpStackLastTimeTLSData*>(thread->GetCustomTLS(kLastDumpStackTime));
580   if (tls_data != nullptr) {
581      last_dump_time_ms = tls_data->last_dump_time_ms_.load();
582   }
583   uint64_t interval = MilliTime() - last_dump_time_ms;
584   if (interval < kIntervalMillis * try_times) {
585     return true;
586   } else {
587     return false;
588   }
589 }
590 
591 template <bool kCheck>
ExclusiveTryLock(Thread * self)592 bool Mutex::ExclusiveTryLock(Thread* self) {
593   DCHECK(self == nullptr || self == Thread::Current());
594   if (kDebugLocking && !recursive_) {
595     AssertNotHeld(self);
596   }
597   if (!recursive_ || !IsExclusiveHeld(self)) {
598 #if ART_USE_FUTEXES
599     bool done = false;
600     do {
601       int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
602       if ((cur_state & kHeldMask) == 0) {
603         // Change state to held and impose load/store ordering appropriate for lock acquisition.
604         done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
605       } else {
606         return false;
607       }
608     } while (!done);
609     DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
610 #else
611     int result = pthread_mutex_trylock(&mutex_);
612     if (result == EBUSY) {
613       return false;
614     }
615     if (result != 0) {
616       errno = result;
617       PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
618     }
619 #endif
620     DCHECK_EQ(GetExclusiveOwnerTid(), 0);
621     exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
622     RegisterAsLocked(self, kCheck);
623   }
624   recursion_count_++;
625   if (kDebugLocking) {
626     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
627         << name_ << " " << recursion_count_;
628     AssertHeld(self);
629   }
630   return true;
631 }
632 
633 template bool Mutex::ExclusiveTryLock<false>(Thread* self);
634 template bool Mutex::ExclusiveTryLock<true>(Thread* self);
635 
ExclusiveTryLockWithSpinning(Thread * self)636 bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
637   // Spin a small number of times, since this affects our ability to respond to suspension
638   // requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable
639   // in rapid succession, and then we will typically not spin for the maximal period.
640   const int kMaxSpins = 5;
641   for (int i = 0; i < kMaxSpins; ++i) {
642     if (ExclusiveTryLock(self)) {
643       return true;
644     }
645 #if ART_USE_FUTEXES
646     if (!WaitBrieflyFor(&state_and_contenders_, self,
647             [](int32_t v) { return (v & kHeldMask) == 0; })) {
648       return false;
649     }
650 #endif
651   }
652   return ExclusiveTryLock(self);
653 }
654 
655 #if ART_USE_FUTEXES
ExclusiveLockUncontendedFor(Thread * new_owner)656 void Mutex::ExclusiveLockUncontendedFor(Thread* new_owner) {
657   DCHECK_EQ(level_, kMonitorLock);
658   DCHECK(!recursive_);
659   state_and_contenders_.store(kHeldMask, std::memory_order_relaxed);
660   recursion_count_ = 1;
661   exclusive_owner_.store(SafeGetTid(new_owner), std::memory_order_relaxed);
662   // Don't call RegisterAsLocked(). It wouldn't register anything anyway.  And
663   // this happens as we're inflating a monitor, which doesn't logically affect
664   // held "locks"; it effectively just converts a thin lock to a mutex.  By doing
665   // this while the lock is already held, we're delaying the acquisition of a
666   // logically held mutex, which can introduce bogus lock order violations.
667 }
668 
ExclusiveUnlockUncontended()669 void Mutex::ExclusiveUnlockUncontended() {
670   DCHECK_EQ(level_, kMonitorLock);
671   state_and_contenders_.store(0, std::memory_order_relaxed);
672   recursion_count_ = 0;
673   exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
674   // Skip RegisterAsUnlocked(), which wouldn't do anything anyway.
675 }
676 #endif  // ART_USE_FUTEXES
677 
ExclusiveUnlock(Thread * self)678 void Mutex::ExclusiveUnlock(Thread* self) {
679   if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
680     std::string name1 = "<null>";
681     std::string name2 = "<null>";
682     if (self != nullptr) {
683       self->GetThreadName(name1);
684     }
685     if (Thread::Current() != nullptr) {
686       Thread::Current()->GetThreadName(name2);
687     }
688     LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
689                << " Thread::Current()=" << name2;
690   }
691   AssertHeld(self);
692   DCHECK_NE(GetExclusiveOwnerTid(), 0);
693   recursion_count_--;
694   if (!recursive_ || recursion_count_ == 0) {
695     if (kDebugLocking) {
696       CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
697           << name_ << " " << recursion_count_;
698     }
699     RegisterAsUnlocked(self);
700 #if ART_USE_FUTEXES
701     bool done = false;
702     do {
703       int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
704       if (LIKELY((cur_state & kHeldMask) != 0)) {
705         // We're no longer the owner.
706         exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
707         // Change state to not held and impose load/store ordering appropriate for lock release.
708         uint32_t new_state = cur_state & ~kHeldMask;  // Same number of contenders.
709         done = state_and_contenders_.CompareAndSetWeakRelease(cur_state, new_state);
710         if (LIKELY(done)) {  // Spurious fail or waiters changed ?
711           if (UNLIKELY(new_state != 0) /* have contenders */) {
712             futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeOne,
713                   nullptr, nullptr, 0);
714           }
715           // We only do a futex wait after incrementing contenders and verifying the lock was
716           // still held. If we didn't see waiters, then there couldn't have been any futexes
717           // waiting on this lock when we did the CAS. New arrivals after that cannot wait for us,
718           // since the futex wait call would see the lock available and immediately return.
719         }
720       } else {
721         // Logging acquires the logging lock, avoid infinite recursion in that case.
722         if (this != Locks::logging_lock_) {
723           LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
724         } else {
725           LogHelper::LogLineLowStack(__FILE__,
726                                      __LINE__,
727                                      ::android::base::FATAL_WITHOUT_ABORT,
728                                      StringPrintf("Unexpected state_ %d in unlock for %s",
729                                                   cur_state, name_).c_str());
730           _exit(1);
731         }
732       }
733     } while (!done);
734 #else
735     exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
736     CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
737 #endif
738   }
739 }
740 
Dump(std::ostream & os) const741 void Mutex::Dump(std::ostream& os) const {
742   os << (recursive_ ? "recursive " : "non-recursive ") << name_
743      << " level=" << static_cast<int>(level_) << " rec=" << recursion_count_
744 #if ART_USE_FUTEXES
745      << " state_and_contenders = " << std::hex << state_and_contenders_ << std::dec
746 #endif
747      << " owner=" << GetExclusiveOwnerTid() << " ";
748   DumpContention(os);
749 }
750 
operator <<(std::ostream & os,const Mutex & mu)751 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
752   mu.Dump(os);
753   return os;
754 }
755 
WakeupToRespondToEmptyCheckpoint()756 void Mutex::WakeupToRespondToEmptyCheckpoint() {
757 #if ART_USE_FUTEXES
758   // Wake up all the waiters so they will respond to the emtpy checkpoint.
759   DCHECK(should_respond_to_empty_checkpoint_request_);
760   if (UNLIKELY(get_contenders() != 0)) {
761     futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
762   }
763 #else
764   LOG(FATAL) << "Non futex case isn't supported.";
765 #endif
766 }
767 
ReaderWriterMutex(const char * name,LockLevel level)768 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
769     : BaseMutex(name, level)
770 #if ART_USE_FUTEXES
771     , state_(0), exclusive_owner_(0), num_contenders_(0)
772 #endif
773 {
774 #if !ART_USE_FUTEXES
775   CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
776 #endif
777 }
778 
~ReaderWriterMutex()779 ReaderWriterMutex::~ReaderWriterMutex() {
780 #if ART_USE_FUTEXES
781   CHECK_EQ(state_.load(std::memory_order_relaxed), 0);
782   CHECK_EQ(GetExclusiveOwnerTid(), 0);
783   CHECK_EQ(num_contenders_.load(std::memory_order_relaxed), 0);
784 #else
785   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
786   // may still be using locks.
787   int rc = pthread_rwlock_destroy(&rwlock_);
788   if (rc != 0) {
789     errno = rc;
790     bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
791     PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_;
792   }
793 #endif
794 }
795 
ExclusiveLock(Thread * self)796 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
797   DCHECK(self == nullptr || self == Thread::Current());
798   AssertNotExclusiveHeld(self);
799 #if ART_USE_FUTEXES
800   bool done = false;
801   do {
802     int32_t cur_state = state_.load(std::memory_order_relaxed);
803     if (LIKELY(cur_state == 0)) {
804       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
805       done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */);
806     } else {
807       // Failed to acquire, hang up.
808       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
809       if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
810         num_contenders_.fetch_add(1);
811         if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
812           self->CheckEmptyCheckpointFromMutex();
813         }
814         if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
815           // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
816           // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
817           if ((errno != EAGAIN) && (errno != EINTR)) {
818             PLOG(FATAL) << "futex wait failed for " << name_;
819           }
820         }
821         SleepIfRuntimeDeleted(self);
822         num_contenders_.fetch_sub(1);
823       }
824     }
825   } while (!done);
826   DCHECK_EQ(state_.load(std::memory_order_relaxed), -1);
827 #else
828   CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
829 #endif
830   DCHECK_EQ(GetExclusiveOwnerTid(), 0);
831   exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
832   RegisterAsLocked(self);
833   AssertExclusiveHeld(self);
834 }
835 
ExclusiveUnlock(Thread * self)836 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
837   DCHECK(self == nullptr || self == Thread::Current());
838   AssertExclusiveHeld(self);
839   RegisterAsUnlocked(self);
840   DCHECK_NE(GetExclusiveOwnerTid(), 0);
841 #if ART_USE_FUTEXES
842   bool done = false;
843   do {
844     int32_t cur_state = state_.load(std::memory_order_relaxed);
845     if (LIKELY(cur_state == -1)) {
846       // We're no longer the owner.
847       exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
848       // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
849       // Note, the num_contenders_ load below musn't reorder before the CompareAndSet.
850       done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
851       if (LIKELY(done)) {  // Weak CAS may fail spuriously.
852         // Wake any waiters.
853         if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
854           futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
855         }
856       }
857     } else {
858       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
859     }
860   } while (!done);
861 #else
862   exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
863   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
864 #endif
865 }
866 
867 #if HAVE_TIMED_RWLOCK
ExclusiveLockWithTimeout(Thread * self,int64_t ms,int32_t ns)868 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
869   DCHECK(self == nullptr || self == Thread::Current());
870 #if ART_USE_FUTEXES
871   bool done = false;
872   timespec end_abs_ts;
873   InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
874   do {
875     int32_t cur_state = state_.load(std::memory_order_relaxed);
876     if (cur_state == 0) {
877       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
878       done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */);
879     } else {
880       // Failed to acquire, hang up.
881       timespec now_abs_ts;
882       InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
883       timespec rel_ts;
884       if (!ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
885         return false;  // Timed out.
886       }
887       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
888       if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
889         num_contenders_.fetch_add(1);
890         if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
891           self->CheckEmptyCheckpointFromMutex();
892         }
893         if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
894           if (errno == ETIMEDOUT) {
895             num_contenders_.fetch_sub(1);
896             return false;  // Timed out.
897           } else if ((errno != EAGAIN) && (errno != EINTR)) {
898             // EAGAIN and EINTR both indicate a spurious failure,
899             // recompute the relative time out from now and try again.
900             // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
901             num_contenders_.fetch_sub(1);  // Unlikely to matter.
902             PLOG(FATAL) << "timed futex wait failed for " << name_;
903           }
904         }
905         SleepIfRuntimeDeleted(self);
906         num_contenders_.fetch_sub(1);
907       }
908     }
909   } while (!done);
910 #else
911   timespec ts;
912   InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
913   int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
914   if (result == ETIMEDOUT) {
915     return false;
916   }
917   if (result != 0) {
918     errno = result;
919     PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
920   }
921 #endif
922   exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
923   RegisterAsLocked(self);
924   AssertSharedHeld(self);
925   return true;
926 }
927 #endif
928 
929 #if ART_USE_FUTEXES
HandleSharedLockContention(Thread * self,int32_t cur_state)930 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
931   // Owner holds it exclusively, hang up.
932   ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
933   if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v >= 0; })) {
934     num_contenders_.fetch_add(1);
935     if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
936       self->CheckEmptyCheckpointFromMutex();
937     }
938     if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
939       if (errno != EAGAIN && errno != EINTR) {
940         PLOG(FATAL) << "futex wait failed for " << name_;
941       }
942     }
943     SleepIfRuntimeDeleted(self);
944     num_contenders_.fetch_sub(1);
945   }
946 }
947 #endif
948 
SharedTryLock(Thread * self,bool check)949 bool ReaderWriterMutex::SharedTryLock(Thread* self, bool check) {
950   DCHECK(self == nullptr || self == Thread::Current());
951 #if ART_USE_FUTEXES
952   bool done = false;
953   do {
954     int32_t cur_state = state_.load(std::memory_order_relaxed);
955     if (cur_state >= 0) {
956       // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
957       done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
958     } else {
959       // Owner holds it exclusively.
960       return false;
961     }
962   } while (!done);
963 #else
964   int result = pthread_rwlock_tryrdlock(&rwlock_);
965   if (result == EBUSY) {
966     return false;
967   }
968   if (result != 0) {
969     errno = result;
970     PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
971   }
972 #endif
973   RegisterAsLocked(self, check);
974   AssertSharedHeld(self);
975   return true;
976 }
977 
IsSharedHeld(const Thread * self) const978 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
979   DCHECK(self == nullptr || self == Thread::Current());
980   bool result;
981   if (UNLIKELY(self == nullptr)) {  // Handle unattached threads.
982     result = IsExclusiveHeld(self);  // TODO: a better best effort here.
983   } else {
984     result = (self->GetHeldMutex(level_) == this);
985   }
986   return result;
987 }
988 
Dump(std::ostream & os) const989 void ReaderWriterMutex::Dump(std::ostream& os) const {
990   os << name_
991       << " level=" << static_cast<int>(level_)
992       << " owner=" << GetExclusiveOwnerTid()
993 #if ART_USE_FUTEXES
994       << " state=" << state_.load(std::memory_order_seq_cst)
995       << " num_contenders=" << num_contenders_.load(std::memory_order_seq_cst)
996 #endif
997       << " ";
998   DumpContention(os);
999 }
1000 
operator <<(std::ostream & os,const ReaderWriterMutex & mu)1001 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
1002   mu.Dump(os);
1003   return os;
1004 }
1005 
operator <<(std::ostream & os,const MutatorMutex & mu)1006 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
1007   mu.Dump(os);
1008   return os;
1009 }
1010 
WakeupToRespondToEmptyCheckpoint()1011 void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
1012 #if ART_USE_FUTEXES
1013   // Wake up all the waiters so they will respond to the emtpy checkpoint.
1014   DCHECK(should_respond_to_empty_checkpoint_request_);
1015   if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
1016     futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
1017   }
1018 #else
1019   LOG(FATAL) << "Non futex case isn't supported.";
1020 #endif
1021 }
1022 
ConditionVariable(const char * name,Mutex & guard)1023 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
1024     : name_(name), guard_(guard) {
1025 #if ART_USE_FUTEXES
1026   DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed));
1027   num_waiters_ = 0;
1028 #else
1029   pthread_condattr_t cond_attrs;
1030   CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
1031 #if !defined(__APPLE__)
1032   // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
1033   CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
1034 #endif
1035   CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
1036 #endif
1037 }
1038 
~ConditionVariable()1039 ConditionVariable::~ConditionVariable() {
1040 #if ART_USE_FUTEXES
1041   if (num_waiters_!= 0) {
1042     bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1043     LOG(is_safe_to_call_abort ? FATAL : WARNING)
1044         << "ConditionVariable::~ConditionVariable for " << name_
1045         << " called with " << num_waiters_ << " waiters.";
1046   }
1047 #else
1048   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
1049   // may still be using condition variables.
1050   int rc = pthread_cond_destroy(&cond_);
1051   if (rc != 0) {
1052     errno = rc;
1053     bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
1054     PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_;
1055   }
1056 #endif
1057 }
1058 
Broadcast(Thread * self)1059 void ConditionVariable::Broadcast(Thread* self) {
1060   DCHECK(self == nullptr || self == Thread::Current());
1061   // TODO: enable below, there's a race in thread creation that causes false failures currently.
1062   // guard_.AssertExclusiveHeld(self);
1063   DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
1064 #if ART_USE_FUTEXES
1065   RequeueWaiters(std::numeric_limits<int32_t>::max());
1066 #else
1067   CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
1068 #endif
1069 }
1070 
1071 #if ART_USE_FUTEXES
RequeueWaiters(int32_t count)1072 void ConditionVariable::RequeueWaiters(int32_t count) {
1073   if (num_waiters_ > 0) {
1074     sequence_++;  // Indicate a signal occurred.
1075     // Move waiters from the condition variable's futex to the guard's futex,
1076     // so that they will be woken up when the mutex is released.
1077     bool done = futex(sequence_.Address(),
1078                       FUTEX_REQUEUE_PRIVATE,
1079                       /* Threads to wake */ 0,
1080                       /* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
1081                       guard_.state_and_contenders_.Address(),
1082                       0) != -1;
1083     if (!done && errno != EAGAIN && errno != EINTR) {
1084       PLOG(FATAL) << "futex requeue failed for " << name_;
1085     }
1086   }
1087 }
1088 #endif
1089 
1090 
Signal(Thread * self)1091 void ConditionVariable::Signal(Thread* self) {
1092   DCHECK(self == nullptr || self == Thread::Current());
1093   guard_.AssertExclusiveHeld(self);
1094 #if ART_USE_FUTEXES
1095   RequeueWaiters(1);
1096 #else
1097   CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
1098 #endif
1099 }
1100 
Wait(Thread * self)1101 void ConditionVariable::Wait(Thread* self) {
1102   guard_.CheckSafeToWait(self);
1103   WaitHoldingLocks(self);
1104 }
1105 
WaitHoldingLocks(Thread * self)1106 void ConditionVariable::WaitHoldingLocks(Thread* self) {
1107   DCHECK(self == nullptr || self == Thread::Current());
1108   guard_.AssertExclusiveHeld(self);
1109   unsigned int old_recursion_count = guard_.recursion_count_;
1110 #if ART_USE_FUTEXES
1111   num_waiters_++;
1112   // Ensure the Mutex is contended so that requeued threads are awoken.
1113   guard_.increment_contenders();
1114   guard_.recursion_count_ = 1;
1115   int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1116   guard_.ExclusiveUnlock(self);
1117   if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) {
1118     // Futex failed, check it is an expected error.
1119     // EAGAIN == EWOULDBLK, so we let the caller try again.
1120     // EINTR implies a signal was sent to this thread.
1121     if ((errno != EINTR) && (errno != EAGAIN)) {
1122       PLOG(FATAL) << "futex wait failed for " << name_;
1123     }
1124   }
1125   SleepIfRuntimeDeleted(self);
1126   guard_.ExclusiveLock(self);
1127   CHECK_GT(num_waiters_, 0);
1128   num_waiters_--;
1129   // We awoke and so no longer require awakes from the guard_'s unlock.
1130   CHECK_GT(guard_.get_contenders(), 0);
1131   guard_.decrement_contenders();
1132 #else
1133   pid_t old_owner = guard_.GetExclusiveOwnerTid();
1134   guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1135   guard_.recursion_count_ = 0;
1136   CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
1137   guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1138 #endif
1139   guard_.recursion_count_ = old_recursion_count;
1140 }
1141 
TimedWait(Thread * self,int64_t ms,int32_t ns)1142 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
1143   DCHECK(self == nullptr || self == Thread::Current());
1144   bool timed_out = false;
1145   guard_.AssertExclusiveHeld(self);
1146   guard_.CheckSafeToWait(self);
1147   unsigned int old_recursion_count = guard_.recursion_count_;
1148 #if ART_USE_FUTEXES
1149   timespec rel_ts;
1150   InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
1151   num_waiters_++;
1152   // Ensure the Mutex is contended so that requeued threads are awoken.
1153   guard_.increment_contenders();
1154   guard_.recursion_count_ = 1;
1155   int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1156   guard_.ExclusiveUnlock(self);
1157   if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) {
1158     if (errno == ETIMEDOUT) {
1159       // Timed out we're done.
1160       timed_out = true;
1161     } else if ((errno == EAGAIN) || (errno == EINTR)) {
1162       // A signal or ConditionVariable::Signal/Broadcast has come in.
1163     } else {
1164       PLOG(FATAL) << "timed futex wait failed for " << name_;
1165     }
1166   }
1167   SleepIfRuntimeDeleted(self);
1168   guard_.ExclusiveLock(self);
1169   CHECK_GT(num_waiters_, 0);
1170   num_waiters_--;
1171   // We awoke and so no longer require awakes from the guard_'s unlock.
1172   CHECK_GT(guard_.get_contenders(), 0);
1173   guard_.decrement_contenders();
1174 #else
1175 #if !defined(__APPLE__)
1176   int clock = CLOCK_MONOTONIC;
1177 #else
1178   int clock = CLOCK_REALTIME;
1179 #endif
1180   pid_t old_owner = guard_.GetExclusiveOwnerTid();
1181   guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1182   guard_.recursion_count_ = 0;
1183   timespec ts;
1184   InitTimeSpec(true, clock, ms, ns, &ts);
1185   int rc;
1186   while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
1187     continue;
1188   }
1189 
1190   if (rc == ETIMEDOUT) {
1191     timed_out = true;
1192   } else if (rc != 0) {
1193     errno = rc;
1194     PLOG(FATAL) << "TimedWait failed for " << name_;
1195   }
1196   guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1197 #endif
1198   guard_.recursion_count_ = old_recursion_count;
1199   return timed_out;
1200 }
1201 
1202 }  // namespace art
1203