1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mutex.h"
18
19 #include <errno.h>
20 #include <sys/time.h>
21
22 #include <sstream>
23
24 #include "android-base/stringprintf.h"
25
26 #include "base/atomic.h"
27 #include "base/logging.h"
28 #include "base/systrace.h"
29 #include "base/time_utils.h"
30 #include "base/value_object.h"
31 #include "mutex-inl.h"
32 #include "scoped_thread_state_change-inl.h"
33 #include "thread-inl.h"
34
35 namespace art {
36
37 using android::base::StringPrintf;
38
39 struct AllMutexData {
40 // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
41 Atomic<const BaseMutex*> all_mutexes_guard;
42 // All created mutexes guarded by all_mutexes_guard_.
43 std::set<BaseMutex*>* all_mutexes;
AllMutexDataart::AllMutexData44 AllMutexData() : all_mutexes(nullptr) {}
45 };
46 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
47
48 #if ART_USE_FUTEXES
ComputeRelativeTimeSpec(timespec * result_ts,const timespec & lhs,const timespec & rhs)49 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
50 const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
51 result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
52 result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
53 if (result_ts->tv_nsec < 0) {
54 result_ts->tv_sec--;
55 result_ts->tv_nsec += one_sec;
56 } else if (result_ts->tv_nsec > one_sec) {
57 result_ts->tv_sec++;
58 result_ts->tv_nsec -= one_sec;
59 }
60 return result_ts->tv_sec < 0;
61 }
62 #endif
63
64 #if ART_USE_FUTEXES
65 // If we wake up from a futex wake, and the runtime disappeared while we were asleep,
66 // it's important to stop in our tracks before we touch deallocated memory.
SleepIfRuntimeDeleted(Thread * self)67 static inline void SleepIfRuntimeDeleted(Thread* self) {
68 if (self != nullptr) {
69 JNIEnvExt* const env = self->GetJniEnv();
70 if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
71 DCHECK(self->IsDaemon());
72 // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
73 // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
74 // --host and --gdb.
75 // After we wake up, the runtime may have been shutdown, which means that this condition may
76 // have been deleted. It is not safe to retry the wait.
77 SleepForever();
78 }
79 }
80 }
81 #else
82 // We should be doing this for pthreads to, but it seems to be impossible for something
83 // like a condition variable wait. Thus we don't bother trying.
84 #endif
85
86 // Wait for an amount of time that roughly increases in the argument i.
87 // Spin for small arguments and yield/sleep for longer ones.
BackOff(uint32_t i)88 static void BackOff(uint32_t i) {
89 static constexpr uint32_t kSpinMax = 10;
90 static constexpr uint32_t kYieldMax = 20;
91 if (i <= kSpinMax) {
92 // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
93 // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor.
94 volatile uint32_t x = 0;
95 const uint32_t spin_count = 10 * i;
96 for (uint32_t spin = 0; spin < spin_count; ++spin) {
97 ++x; // Volatile; hence should not be optimized away.
98 }
99 // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
100 } else if (i <= kYieldMax) {
101 sched_yield();
102 } else {
103 NanoSleep(1000ull * (i - kYieldMax));
104 }
105 }
106
107 // Wait until pred(testLoc->load(std::memory_order_relaxed)) holds, or until a
108 // short time interval, on the order of kernel context-switch time, passes.
109 // Return true if the predicate test succeeded, false if we timed out.
110 template<typename Pred>
WaitBrieflyFor(AtomicInteger * testLoc,Thread * self,Pred pred)111 static inline bool WaitBrieflyFor(AtomicInteger* testLoc, Thread* self, Pred pred) {
112 // TODO: Tune these parameters correctly. BackOff(3) should take on the order of 100 cycles. So
113 // this should result in retrying <= 10 times, usually waiting around 100 cycles each. The
114 // maximum delay should be significantly less than the expected futex() context switch time, so
115 // there should be little danger of this worsening things appreciably. If the lock was only
116 // held briefly by a running thread, this should help immensely.
117 static constexpr uint32_t kMaxBackOff = 3; // Should probably be <= kSpinMax above.
118 static constexpr uint32_t kMaxIters = 50;
119 JNIEnvExt* const env = self == nullptr ? nullptr : self->GetJniEnv();
120 for (uint32_t i = 1; i <= kMaxIters; ++i) {
121 BackOff(std::min(i, kMaxBackOff));
122 if (pred(testLoc->load(std::memory_order_relaxed))) {
123 return true;
124 }
125 if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
126 // This returns true once we've started shutting down. We then try to reach a quiescent
127 // state as soon as possible to avoid touching data that may be deallocated by the shutdown
128 // process. It currently relies on a timeout.
129 return false;
130 }
131 }
132 return false;
133 }
134
135 class ScopedAllMutexesLock final {
136 public:
ScopedAllMutexesLock(const BaseMutex * mutex)137 explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
138 for (uint32_t i = 0;
139 !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex);
140 ++i) {
141 BackOff(i);
142 }
143 }
144
~ScopedAllMutexesLock()145 ~ScopedAllMutexesLock() {
146 DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_);
147 gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release);
148 }
149
150 private:
151 const BaseMutex* const mutex_;
152 };
153
154 // Scoped class that generates events at the beginning and end of lock contention.
155 class ScopedContentionRecorder final : public ValueObject {
156 public:
ScopedContentionRecorder(BaseMutex * mutex,uint64_t blocked_tid,uint64_t owner_tid)157 ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
158 : mutex_(kLogLockContentions ? mutex : nullptr),
159 blocked_tid_(kLogLockContentions ? blocked_tid : 0),
160 owner_tid_(kLogLockContentions ? owner_tid : 0),
161 start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
162 if (ATraceEnabled()) {
163 std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
164 mutex->GetName(), owner_tid);
165 ATraceBegin(msg.c_str());
166 }
167 }
168
~ScopedContentionRecorder()169 ~ScopedContentionRecorder() {
170 ATraceEnd();
171 if (kLogLockContentions) {
172 uint64_t end_nano_time = NanoTime();
173 mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
174 }
175 }
176
177 private:
178 BaseMutex* const mutex_;
179 const uint64_t blocked_tid_;
180 const uint64_t owner_tid_;
181 const uint64_t start_nano_time_;
182 };
183
BaseMutex(const char * name,LockLevel level)184 BaseMutex::BaseMutex(const char* name, LockLevel level)
185 : name_(name),
186 level_(level),
187 should_respond_to_empty_checkpoint_request_(false) {
188 if (kLogLockContentions) {
189 ScopedAllMutexesLock mu(this);
190 std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
191 if (*all_mutexes_ptr == nullptr) {
192 // We leak the global set of all mutexes to avoid ordering issues in global variable
193 // construction/destruction.
194 *all_mutexes_ptr = new std::set<BaseMutex*>();
195 }
196 (*all_mutexes_ptr)->insert(this);
197 }
198 }
199
~BaseMutex()200 BaseMutex::~BaseMutex() {
201 if (kLogLockContentions) {
202 ScopedAllMutexesLock mu(this);
203 gAllMutexData->all_mutexes->erase(this);
204 }
205 }
206
DumpAll(std::ostream & os)207 void BaseMutex::DumpAll(std::ostream& os) {
208 if (kLogLockContentions) {
209 os << "Mutex logging:\n";
210 ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
211 std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
212 if (all_mutexes == nullptr) {
213 // No mutexes have been created yet during at startup.
214 return;
215 }
216 os << "(Contended)\n";
217 for (const BaseMutex* mutex : *all_mutexes) {
218 if (mutex->HasEverContended()) {
219 mutex->Dump(os);
220 os << "\n";
221 }
222 }
223 os << "(Never contented)\n";
224 for (const BaseMutex* mutex : *all_mutexes) {
225 if (!mutex->HasEverContended()) {
226 mutex->Dump(os);
227 os << "\n";
228 }
229 }
230 }
231 }
232
CheckSafeToWait(Thread * self)233 void BaseMutex::CheckSafeToWait(Thread* self) {
234 if (self == nullptr) {
235 CheckUnattachedThread(level_);
236 return;
237 }
238 if (kDebugLocking) {
239 CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
240 << "Waiting on unacquired mutex: " << name_;
241 bool bad_mutexes_held = false;
242 std::string error_msg;
243 for (int i = kLockLevelCount - 1; i >= 0; --i) {
244 if (i != level_) {
245 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
246 // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This
247 // just means that gc or some other internal process is suspending the thread while it is
248 // trying to suspend some other thread. So long as the current thread is not being suspended
249 // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear)
250 // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted
251 // code interacts with suspension. One holds the lock to prevent user-code-suspension from
252 // occurring. Since this is only initiated from user-supplied native-code this is safe.
253 if (held_mutex == Locks::user_code_suspension_lock_) {
254 // No thread safety analysis is fine since we have both the user_code_suspension_lock_
255 // from the line above and the ThreadSuspendCountLock since it is our level_. We use this
256 // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS.
257 auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS {
258 return self->GetUserCodeSuspendCount() != 0;
259 };
260 if (is_suspending_for_user_code()) {
261 std::ostringstream oss;
262 oss << "Holding \"" << held_mutex->name_ << "\" "
263 << "(level " << LockLevel(i) << ") while performing wait on "
264 << "\"" << name_ << "\" (level " << level_ << ") "
265 << "with SuspendReason::kForUserCode pending suspensions";
266 error_msg = oss.str();
267 LOG(ERROR) << error_msg;
268 bad_mutexes_held = true;
269 }
270 } else if (held_mutex != nullptr) {
271 std::ostringstream oss;
272 oss << "Holding \"" << held_mutex->name_ << "\" "
273 << "(level " << LockLevel(i) << ") while performing wait on "
274 << "\"" << name_ << "\" (level " << level_ << ")";
275 error_msg = oss.str();
276 LOG(ERROR) << error_msg;
277 bad_mutexes_held = true;
278 }
279 }
280 }
281 if (gAborting == 0) { // Avoid recursive aborts.
282 CHECK(!bad_mutexes_held) << error_msg;
283 }
284 }
285 }
286
AddToWaitTime(uint64_t value)287 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
288 if (kLogLockContentions) {
289 // Atomically add value to wait_time.
290 wait_time.fetch_add(value, std::memory_order_seq_cst);
291 }
292 }
293
RecordContention(uint64_t blocked_tid,uint64_t owner_tid,uint64_t nano_time_blocked)294 void BaseMutex::RecordContention(uint64_t blocked_tid,
295 uint64_t owner_tid,
296 uint64_t nano_time_blocked) {
297 if (kLogLockContentions) {
298 ContentionLogData* data = contention_log_data_;
299 ++(data->contention_count);
300 data->AddToWaitTime(nano_time_blocked);
301 ContentionLogEntry* log = data->contention_log;
302 // This code is intentionally racy as it is only used for diagnostics.
303 int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
304 if (log[slot].blocked_tid == blocked_tid &&
305 log[slot].owner_tid == blocked_tid) {
306 ++log[slot].count;
307 } else {
308 uint32_t new_slot;
309 do {
310 slot = data->cur_content_log_entry.load(std::memory_order_relaxed);
311 new_slot = (slot + 1) % kContentionLogSize;
312 } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot));
313 log[new_slot].blocked_tid = blocked_tid;
314 log[new_slot].owner_tid = owner_tid;
315 log[new_slot].count.store(1, std::memory_order_relaxed);
316 }
317 }
318 }
319
DumpContention(std::ostream & os) const320 void BaseMutex::DumpContention(std::ostream& os) const {
321 if (kLogLockContentions) {
322 const ContentionLogData* data = contention_log_data_;
323 const ContentionLogEntry* log = data->contention_log;
324 uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed);
325 uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed);
326 if (contention_count == 0) {
327 os << "never contended";
328 } else {
329 os << "contended " << contention_count
330 << " total wait of contender " << PrettyDuration(wait_time)
331 << " average " << PrettyDuration(wait_time / contention_count);
332 SafeMap<uint64_t, size_t> most_common_blocker;
333 SafeMap<uint64_t, size_t> most_common_blocked;
334 for (size_t i = 0; i < kContentionLogSize; ++i) {
335 uint64_t blocked_tid = log[i].blocked_tid;
336 uint64_t owner_tid = log[i].owner_tid;
337 uint32_t count = log[i].count.load(std::memory_order_relaxed);
338 if (count > 0) {
339 auto it = most_common_blocked.find(blocked_tid);
340 if (it != most_common_blocked.end()) {
341 most_common_blocked.Overwrite(blocked_tid, it->second + count);
342 } else {
343 most_common_blocked.Put(blocked_tid, count);
344 }
345 it = most_common_blocker.find(owner_tid);
346 if (it != most_common_blocker.end()) {
347 most_common_blocker.Overwrite(owner_tid, it->second + count);
348 } else {
349 most_common_blocker.Put(owner_tid, count);
350 }
351 }
352 }
353 uint64_t max_tid = 0;
354 size_t max_tid_count = 0;
355 for (const auto& pair : most_common_blocked) {
356 if (pair.second > max_tid_count) {
357 max_tid = pair.first;
358 max_tid_count = pair.second;
359 }
360 }
361 if (max_tid != 0) {
362 os << " sample shows most blocked tid=" << max_tid;
363 }
364 max_tid = 0;
365 max_tid_count = 0;
366 for (const auto& pair : most_common_blocker) {
367 if (pair.second > max_tid_count) {
368 max_tid = pair.first;
369 max_tid_count = pair.second;
370 }
371 }
372 if (max_tid != 0) {
373 os << " sample shows tid=" << max_tid << " owning during this time";
374 }
375 }
376 }
377 }
378
379
Mutex(const char * name,LockLevel level,bool recursive)380 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
381 : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) {
382 #if ART_USE_FUTEXES
383 DCHECK_EQ(0, state_and_contenders_.load(std::memory_order_relaxed));
384 #else
385 CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
386 #endif
387 }
388
389 // Helper to allow checking shutdown while locking for thread safety.
IsSafeToCallAbortSafe()390 static bool IsSafeToCallAbortSafe() {
391 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
392 return Locks::IsSafeToCallAbortRacy();
393 }
394
~Mutex()395 Mutex::~Mutex() {
396 bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy();
397 #if ART_USE_FUTEXES
398 if (state_and_contenders_.load(std::memory_order_relaxed) != 0) {
399 LOG(safe_to_call_abort ? FATAL : WARNING)
400 << "destroying mutex with owner or contenders. Owner:" << GetExclusiveOwnerTid();
401 } else {
402 if (GetExclusiveOwnerTid() != 0) {
403 LOG(safe_to_call_abort ? FATAL : WARNING)
404 << "unexpectedly found an owner on unlocked mutex " << name_;
405 }
406 }
407 #else
408 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
409 // may still be using locks.
410 int rc = pthread_mutex_destroy(&mutex_);
411 if (rc != 0) {
412 errno = rc;
413 PLOG(safe_to_call_abort ? FATAL : WARNING)
414 << "pthread_mutex_destroy failed for " << name_;
415 }
416 #endif
417 }
418
ExclusiveLock(Thread * self)419 void Mutex::ExclusiveLock(Thread* self) {
420 DCHECK(self == nullptr || self == Thread::Current());
421 if (kDebugLocking && !recursive_) {
422 AssertNotHeld(self);
423 }
424 if (!recursive_ || !IsExclusiveHeld(self)) {
425 #if ART_USE_FUTEXES
426 bool done = false;
427 do {
428 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
429 if (LIKELY((cur_state & kHeldMask) == 0) /* lock not held */) {
430 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
431 } else {
432 // Failed to acquire, hang up.
433 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
434 // Empirically, it appears important to spin again each time through the loop; if we
435 // bother to go to sleep and wake up, we should be fairly persistent in trying for the
436 // lock.
437 if (!WaitBrieflyFor(&state_and_contenders_, self,
438 [](int32_t v) { return (v & kHeldMask) == 0; })) {
439 // Increment contender count. We can't create enough threads for this to overflow.
440 increment_contenders();
441 // Make cur_state again reflect the expected value of state_and_contenders.
442 cur_state += kContenderIncrement;
443 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
444 self->CheckEmptyCheckpointFromMutex();
445 }
446 do {
447 if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state,
448 nullptr, nullptr, 0) != 0) {
449 // We only went to sleep after incrementing and contenders and checking that the
450 // lock is still held by someone else. EAGAIN and EINTR both indicate a spurious
451 // failure, try again from the beginning. We don't use TEMP_FAILURE_RETRY so we can
452 // intentionally retry to acquire the lock.
453 if ((errno != EAGAIN) && (errno != EINTR)) {
454 PLOG(FATAL) << "futex wait failed for " << name_;
455 }
456 }
457 SleepIfRuntimeDeleted(self);
458 // Retry until not held. In heavy contention situations we otherwise get redundant
459 // futex wakeups as a result of repeatedly decrementing and incrementing contenders.
460 cur_state = state_and_contenders_.load(std::memory_order_relaxed);
461 } while ((cur_state & kHeldMask) != 0);
462 decrement_contenders();
463 }
464 }
465 } while (!done);
466 // Confirm that lock is now held.
467 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
468 #else
469 CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
470 #endif
471 DCHECK_EQ(GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self)
472 << " recursive_ = " << recursive_;
473 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
474 RegisterAsLocked(self);
475 }
476 recursion_count_++;
477 if (kDebugLocking) {
478 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
479 << name_ << " " << recursion_count_;
480 AssertHeld(self);
481 }
482 }
483
ExclusiveTryLock(Thread * self)484 bool Mutex::ExclusiveTryLock(Thread* self) {
485 DCHECK(self == nullptr || self == Thread::Current());
486 if (kDebugLocking && !recursive_) {
487 AssertNotHeld(self);
488 }
489 if (!recursive_ || !IsExclusiveHeld(self)) {
490 #if ART_USE_FUTEXES
491 bool done = false;
492 do {
493 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
494 if ((cur_state & kHeldMask) == 0) {
495 // Change state to held and impose load/store ordering appropriate for lock acquisition.
496 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask);
497 } else {
498 return false;
499 }
500 } while (!done);
501 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0);
502 #else
503 int result = pthread_mutex_trylock(&mutex_);
504 if (result == EBUSY) {
505 return false;
506 }
507 if (result != 0) {
508 errno = result;
509 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
510 }
511 #endif
512 DCHECK_EQ(GetExclusiveOwnerTid(), 0);
513 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
514 RegisterAsLocked(self);
515 }
516 recursion_count_++;
517 if (kDebugLocking) {
518 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
519 << name_ << " " << recursion_count_;
520 AssertHeld(self);
521 }
522 return true;
523 }
524
ExclusiveTryLockWithSpinning(Thread * self)525 bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
526 // Spin a small number of times, since this affects our ability to respond to suspension
527 // requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable
528 // in rapid succession, and then we will typically not spin for the maximal period.
529 const int kMaxSpins = 5;
530 for (int i = 0; i < kMaxSpins; ++i) {
531 if (ExclusiveTryLock(self)) {
532 return true;
533 }
534 #if ART_USE_FUTEXES
535 if (!WaitBrieflyFor(&state_and_contenders_, self,
536 [](int32_t v) { return (v & kHeldMask) == 0; })) {
537 return false;
538 }
539 #endif
540 }
541 return ExclusiveTryLock(self);
542 }
543
544 #if ART_USE_FUTEXES
ExclusiveLockUncontendedFor(Thread * new_owner)545 void Mutex::ExclusiveLockUncontendedFor(Thread* new_owner) {
546 DCHECK_EQ(level_, kMonitorLock);
547 DCHECK(!recursive_);
548 state_and_contenders_.store(kHeldMask, std::memory_order_relaxed);
549 recursion_count_ = 1;
550 exclusive_owner_.store(SafeGetTid(new_owner), std::memory_order_relaxed);
551 // Don't call RegisterAsLocked(). It wouldn't register anything anyway. And
552 // this happens as we're inflating a monitor, which doesn't logically affect
553 // held "locks"; it effectively just converts a thin lock to a mutex. By doing
554 // this while the lock is already held, we're delaying the acquisition of a
555 // logically held mutex, which can introduce bogus lock order violations.
556 }
557
ExclusiveUnlockUncontended()558 void Mutex::ExclusiveUnlockUncontended() {
559 DCHECK_EQ(level_, kMonitorLock);
560 state_and_contenders_.store(0, std::memory_order_relaxed);
561 recursion_count_ = 0;
562 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
563 // Skip RegisterAsUnlocked(), which wouldn't do anything anyway.
564 }
565 #endif // ART_USE_FUTEXES
566
ExclusiveUnlock(Thread * self)567 void Mutex::ExclusiveUnlock(Thread* self) {
568 if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
569 std::string name1 = "<null>";
570 std::string name2 = "<null>";
571 if (self != nullptr) {
572 self->GetThreadName(name1);
573 }
574 if (Thread::Current() != nullptr) {
575 Thread::Current()->GetThreadName(name2);
576 }
577 LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1
578 << " Thread::Current()=" << name2;
579 }
580 AssertHeld(self);
581 DCHECK_NE(GetExclusiveOwnerTid(), 0);
582 recursion_count_--;
583 if (!recursive_ || recursion_count_ == 0) {
584 if (kDebugLocking) {
585 CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
586 << name_ << " " << recursion_count_;
587 }
588 RegisterAsUnlocked(self);
589 #if ART_USE_FUTEXES
590 bool done = false;
591 do {
592 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed);
593 if (LIKELY((cur_state & kHeldMask) != 0)) {
594 // We're no longer the owner.
595 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
596 // Change state to not held and impose load/store ordering appropriate for lock release.
597 uint32_t new_state = cur_state & ~kHeldMask; // Same number of contenders.
598 done = state_and_contenders_.CompareAndSetWeakRelease(cur_state, new_state);
599 if (LIKELY(done)) { // Spurious fail or waiters changed ?
600 if (UNLIKELY(new_state != 0) /* have contenders */) {
601 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeOne,
602 nullptr, nullptr, 0);
603 }
604 // We only do a futex wait after incrementing contenders and verifying the lock was
605 // still held. If we didn't see waiters, then there couldn't have been any futexes
606 // waiting on this lock when we did the CAS. New arrivals after that cannot wait for us,
607 // since the futex wait call would see the lock available and immediately return.
608 }
609 } else {
610 // Logging acquires the logging lock, avoid infinite recursion in that case.
611 if (this != Locks::logging_lock_) {
612 LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
613 } else {
614 LogHelper::LogLineLowStack(__FILE__,
615 __LINE__,
616 ::android::base::FATAL_WITHOUT_ABORT,
617 StringPrintf("Unexpected state_ %d in unlock for %s",
618 cur_state, name_).c_str());
619 _exit(1);
620 }
621 }
622 } while (!done);
623 #else
624 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
625 CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
626 #endif
627 }
628 }
629
Dump(std::ostream & os) const630 void Mutex::Dump(std::ostream& os) const {
631 os << (recursive_ ? "recursive " : "non-recursive ")
632 << name_
633 << " level=" << static_cast<int>(level_)
634 << " rec=" << recursion_count_
635 << " owner=" << GetExclusiveOwnerTid() << " ";
636 DumpContention(os);
637 }
638
operator <<(std::ostream & os,const Mutex & mu)639 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
640 mu.Dump(os);
641 return os;
642 }
643
WakeupToRespondToEmptyCheckpoint()644 void Mutex::WakeupToRespondToEmptyCheckpoint() {
645 #if ART_USE_FUTEXES
646 // Wake up all the waiters so they will respond to the emtpy checkpoint.
647 DCHECK(should_respond_to_empty_checkpoint_request_);
648 if (UNLIKELY(get_contenders() != 0)) {
649 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
650 }
651 #else
652 LOG(FATAL) << "Non futex case isn't supported.";
653 #endif
654 }
655
ReaderWriterMutex(const char * name,LockLevel level)656 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
657 : BaseMutex(name, level)
658 #if ART_USE_FUTEXES
659 , state_(0), exclusive_owner_(0), num_contenders_(0)
660 #endif
661 {
662 #if !ART_USE_FUTEXES
663 CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
664 #endif
665 }
666
~ReaderWriterMutex()667 ReaderWriterMutex::~ReaderWriterMutex() {
668 #if ART_USE_FUTEXES
669 CHECK_EQ(state_.load(std::memory_order_relaxed), 0);
670 CHECK_EQ(GetExclusiveOwnerTid(), 0);
671 CHECK_EQ(num_contenders_.load(std::memory_order_relaxed), 0);
672 #else
673 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
674 // may still be using locks.
675 int rc = pthread_rwlock_destroy(&rwlock_);
676 if (rc != 0) {
677 errno = rc;
678 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
679 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_;
680 }
681 #endif
682 }
683
ExclusiveLock(Thread * self)684 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
685 DCHECK(self == nullptr || self == Thread::Current());
686 AssertNotExclusiveHeld(self);
687 #if ART_USE_FUTEXES
688 bool done = false;
689 do {
690 int32_t cur_state = state_.load(std::memory_order_relaxed);
691 if (LIKELY(cur_state == 0)) {
692 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
693 done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */);
694 } else {
695 // Failed to acquire, hang up.
696 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
697 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
698 num_contenders_.fetch_add(1);
699 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
700 self->CheckEmptyCheckpointFromMutex();
701 }
702 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
703 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
704 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
705 if ((errno != EAGAIN) && (errno != EINTR)) {
706 PLOG(FATAL) << "futex wait failed for " << name_;
707 }
708 }
709 SleepIfRuntimeDeleted(self);
710 num_contenders_.fetch_sub(1);
711 }
712 }
713 } while (!done);
714 DCHECK_EQ(state_.load(std::memory_order_relaxed), -1);
715 #else
716 CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
717 #endif
718 DCHECK_EQ(GetExclusiveOwnerTid(), 0);
719 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
720 RegisterAsLocked(self);
721 AssertExclusiveHeld(self);
722 }
723
ExclusiveUnlock(Thread * self)724 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
725 DCHECK(self == nullptr || self == Thread::Current());
726 AssertExclusiveHeld(self);
727 RegisterAsUnlocked(self);
728 DCHECK_NE(GetExclusiveOwnerTid(), 0);
729 #if ART_USE_FUTEXES
730 bool done = false;
731 do {
732 int32_t cur_state = state_.load(std::memory_order_relaxed);
733 if (LIKELY(cur_state == -1)) {
734 // We're no longer the owner.
735 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
736 // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
737 // Note, the num_contenders_ load below musn't reorder before the CompareAndSet.
738 done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
739 if (LIKELY(done)) { // Weak CAS may fail spuriously.
740 // Wake any waiters.
741 if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
742 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
743 }
744 }
745 } else {
746 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
747 }
748 } while (!done);
749 #else
750 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
751 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
752 #endif
753 }
754
755 #if HAVE_TIMED_RWLOCK
ExclusiveLockWithTimeout(Thread * self,int64_t ms,int32_t ns)756 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
757 DCHECK(self == nullptr || self == Thread::Current());
758 #if ART_USE_FUTEXES
759 bool done = false;
760 timespec end_abs_ts;
761 InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts);
762 do {
763 int32_t cur_state = state_.load(std::memory_order_relaxed);
764 if (cur_state == 0) {
765 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
766 done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */);
767 } else {
768 // Failed to acquire, hang up.
769 timespec now_abs_ts;
770 InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
771 timespec rel_ts;
772 if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
773 return false; // Timed out.
774 }
775 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
776 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
777 num_contenders_.fetch_add(1);
778 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
779 self->CheckEmptyCheckpointFromMutex();
780 }
781 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
782 if (errno == ETIMEDOUT) {
783 num_contenders_.fetch_sub(1);
784 return false; // Timed out.
785 } else if ((errno != EAGAIN) && (errno != EINTR)) {
786 // EAGAIN and EINTR both indicate a spurious failure,
787 // recompute the relative time out from now and try again.
788 // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
789 PLOG(FATAL) << "timed futex wait failed for " << name_;
790 }
791 }
792 SleepIfRuntimeDeleted(self);
793 num_contenders_.fetch_sub(1);
794 }
795 }
796 } while (!done);
797 #else
798 timespec ts;
799 InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
800 int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
801 if (result == ETIMEDOUT) {
802 return false;
803 }
804 if (result != 0) {
805 errno = result;
806 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
807 }
808 #endif
809 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
810 RegisterAsLocked(self);
811 AssertSharedHeld(self);
812 return true;
813 }
814 #endif
815
816 #if ART_USE_FUTEXES
HandleSharedLockContention(Thread * self,int32_t cur_state)817 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
818 // Owner holds it exclusively, hang up.
819 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
820 if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v >= 0; })) {
821 num_contenders_.fetch_add(1);
822 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
823 self->CheckEmptyCheckpointFromMutex();
824 }
825 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
826 if (errno != EAGAIN && errno != EINTR) {
827 PLOG(FATAL) << "futex wait failed for " << name_;
828 }
829 }
830 SleepIfRuntimeDeleted(self);
831 num_contenders_.fetch_sub(1);
832 }
833 }
834 #endif
835
SharedTryLock(Thread * self)836 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
837 DCHECK(self == nullptr || self == Thread::Current());
838 #if ART_USE_FUTEXES
839 bool done = false;
840 do {
841 int32_t cur_state = state_.load(std::memory_order_relaxed);
842 if (cur_state >= 0) {
843 // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
844 done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
845 } else {
846 // Owner holds it exclusively.
847 return false;
848 }
849 } while (!done);
850 #else
851 int result = pthread_rwlock_tryrdlock(&rwlock_);
852 if (result == EBUSY) {
853 return false;
854 }
855 if (result != 0) {
856 errno = result;
857 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
858 }
859 #endif
860 RegisterAsLocked(self);
861 AssertSharedHeld(self);
862 return true;
863 }
864
IsSharedHeld(const Thread * self) const865 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
866 DCHECK(self == nullptr || self == Thread::Current());
867 bool result;
868 if (UNLIKELY(self == nullptr)) { // Handle unattached threads.
869 result = IsExclusiveHeld(self); // TODO: a better best effort here.
870 } else {
871 result = (self->GetHeldMutex(level_) == this);
872 }
873 return result;
874 }
875
Dump(std::ostream & os) const876 void ReaderWriterMutex::Dump(std::ostream& os) const {
877 os << name_
878 << " level=" << static_cast<int>(level_)
879 << " owner=" << GetExclusiveOwnerTid()
880 #if ART_USE_FUTEXES
881 << " state=" << state_.load(std::memory_order_seq_cst)
882 << " num_contenders=" << num_contenders_.load(std::memory_order_seq_cst)
883 #endif
884 << " ";
885 DumpContention(os);
886 }
887
operator <<(std::ostream & os,const ReaderWriterMutex & mu)888 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
889 mu.Dump(os);
890 return os;
891 }
892
operator <<(std::ostream & os,const MutatorMutex & mu)893 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) {
894 mu.Dump(os);
895 return os;
896 }
897
WakeupToRespondToEmptyCheckpoint()898 void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() {
899 #if ART_USE_FUTEXES
900 // Wake up all the waiters so they will respond to the emtpy checkpoint.
901 DCHECK(should_respond_to_empty_checkpoint_request_);
902 if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
903 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
904 }
905 #else
906 LOG(FATAL) << "Non futex case isn't supported.";
907 #endif
908 }
909
ConditionVariable(const char * name,Mutex & guard)910 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
911 : name_(name), guard_(guard) {
912 #if ART_USE_FUTEXES
913 DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed));
914 num_waiters_ = 0;
915 #else
916 pthread_condattr_t cond_attrs;
917 CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
918 #if !defined(__APPLE__)
919 // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
920 CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
921 #endif
922 CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
923 #endif
924 }
925
~ConditionVariable()926 ConditionVariable::~ConditionVariable() {
927 #if ART_USE_FUTEXES
928 if (num_waiters_!= 0) {
929 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
930 LOG(is_safe_to_call_abort ? FATAL : WARNING)
931 << "ConditionVariable::~ConditionVariable for " << name_
932 << " called with " << num_waiters_ << " waiters.";
933 }
934 #else
935 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
936 // may still be using condition variables.
937 int rc = pthread_cond_destroy(&cond_);
938 if (rc != 0) {
939 errno = rc;
940 bool is_safe_to_call_abort = IsSafeToCallAbortSafe();
941 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_;
942 }
943 #endif
944 }
945
Broadcast(Thread * self)946 void ConditionVariable::Broadcast(Thread* self) {
947 DCHECK(self == nullptr || self == Thread::Current());
948 // TODO: enable below, there's a race in thread creation that causes false failures currently.
949 // guard_.AssertExclusiveHeld(self);
950 DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
951 #if ART_USE_FUTEXES
952 RequeueWaiters(std::numeric_limits<int32_t>::max());
953 #else
954 CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
955 #endif
956 }
957
958 #if ART_USE_FUTEXES
RequeueWaiters(int32_t count)959 void ConditionVariable::RequeueWaiters(int32_t count) {
960 if (num_waiters_ > 0) {
961 sequence_++; // Indicate a signal occurred.
962 // Move waiters from the condition variable's futex to the guard's futex,
963 // so that they will be woken up when the mutex is released.
964 bool done = futex(sequence_.Address(),
965 FUTEX_REQUEUE_PRIVATE,
966 /* Threads to wake */ 0,
967 /* Threads to requeue*/ reinterpret_cast<const timespec*>(count),
968 guard_.state_and_contenders_.Address(),
969 0) != -1;
970 if (!done && errno != EAGAIN && errno != EINTR) {
971 PLOG(FATAL) << "futex requeue failed for " << name_;
972 }
973 }
974 }
975 #endif
976
977
Signal(Thread * self)978 void ConditionVariable::Signal(Thread* self) {
979 DCHECK(self == nullptr || self == Thread::Current());
980 guard_.AssertExclusiveHeld(self);
981 #if ART_USE_FUTEXES
982 RequeueWaiters(1);
983 #else
984 CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
985 #endif
986 }
987
Wait(Thread * self)988 void ConditionVariable::Wait(Thread* self) {
989 guard_.CheckSafeToWait(self);
990 WaitHoldingLocks(self);
991 }
992
WaitHoldingLocks(Thread * self)993 void ConditionVariable::WaitHoldingLocks(Thread* self) {
994 DCHECK(self == nullptr || self == Thread::Current());
995 guard_.AssertExclusiveHeld(self);
996 unsigned int old_recursion_count = guard_.recursion_count_;
997 #if ART_USE_FUTEXES
998 num_waiters_++;
999 // Ensure the Mutex is contended so that requeued threads are awoken.
1000 guard_.increment_contenders();
1001 guard_.recursion_count_ = 1;
1002 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1003 guard_.ExclusiveUnlock(self);
1004 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) {
1005 // Futex failed, check it is an expected error.
1006 // EAGAIN == EWOULDBLK, so we let the caller try again.
1007 // EINTR implies a signal was sent to this thread.
1008 if ((errno != EINTR) && (errno != EAGAIN)) {
1009 PLOG(FATAL) << "futex wait failed for " << name_;
1010 }
1011 }
1012 SleepIfRuntimeDeleted(self);
1013 guard_.ExclusiveLock(self);
1014 CHECK_GT(num_waiters_, 0);
1015 num_waiters_--;
1016 // We awoke and so no longer require awakes from the guard_'s unlock.
1017 CHECK_GT(guard_.get_contenders(), 0);
1018 guard_.decrement_contenders();
1019 #else
1020 pid_t old_owner = guard_.GetExclusiveOwnerTid();
1021 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1022 guard_.recursion_count_ = 0;
1023 CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
1024 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1025 #endif
1026 guard_.recursion_count_ = old_recursion_count;
1027 }
1028
TimedWait(Thread * self,int64_t ms,int32_t ns)1029 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
1030 DCHECK(self == nullptr || self == Thread::Current());
1031 bool timed_out = false;
1032 guard_.AssertExclusiveHeld(self);
1033 guard_.CheckSafeToWait(self);
1034 unsigned int old_recursion_count = guard_.recursion_count_;
1035 #if ART_USE_FUTEXES
1036 timespec rel_ts;
1037 InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
1038 num_waiters_++;
1039 // Ensure the Mutex is contended so that requeued threads are awoken.
1040 guard_.increment_contenders();
1041 guard_.recursion_count_ = 1;
1042 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed);
1043 guard_.ExclusiveUnlock(self);
1044 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) {
1045 if (errno == ETIMEDOUT) {
1046 // Timed out we're done.
1047 timed_out = true;
1048 } else if ((errno == EAGAIN) || (errno == EINTR)) {
1049 // A signal or ConditionVariable::Signal/Broadcast has come in.
1050 } else {
1051 PLOG(FATAL) << "timed futex wait failed for " << name_;
1052 }
1053 }
1054 SleepIfRuntimeDeleted(self);
1055 guard_.ExclusiveLock(self);
1056 CHECK_GT(num_waiters_, 0);
1057 num_waiters_--;
1058 // We awoke and so no longer require awakes from the guard_'s unlock.
1059 CHECK_GT(guard_.get_contenders(), 0);
1060 guard_.decrement_contenders();
1061 #else
1062 #if !defined(__APPLE__)
1063 int clock = CLOCK_MONOTONIC;
1064 #else
1065 int clock = CLOCK_REALTIME;
1066 #endif
1067 pid_t old_owner = guard_.GetExclusiveOwnerTid();
1068 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
1069 guard_.recursion_count_ = 0;
1070 timespec ts;
1071 InitTimeSpec(true, clock, ms, ns, &ts);
1072 int rc;
1073 while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
1074 continue;
1075 }
1076
1077 if (rc == ETIMEDOUT) {
1078 timed_out = true;
1079 } else if (rc != 0) {
1080 errno = rc;
1081 PLOG(FATAL) << "TimedWait failed for " << name_;
1082 }
1083 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed);
1084 #endif
1085 guard_.recursion_count_ = old_recursion_count;
1086 return timed_out;
1087 }
1088
1089 } // namespace art
1090