1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_BASE_MUTEX_H_
18 #define ART_RUNTIME_BASE_MUTEX_H_
19 
20 #include <pthread.h>
21 #include <stdint.h>
22 #include <unistd.h>  // for pid_t
23 
24 #include <iosfwd>
25 #include <string>
26 
27 #include <android-base/logging.h>
28 
29 #include "base/aborting.h"
30 #include "base/atomic.h"
31 #include "base/globals.h"
32 #include "base/macros.h"
33 
34 #if defined(__APPLE__)
35 #define ART_USE_FUTEXES 0
36 #else
37 #define ART_USE_FUTEXES 1
38 #endif
39 
40 // Currently Darwin doesn't support locks with timeouts.
41 #if !defined(__APPLE__)
42 #define HAVE_TIMED_RWLOCK 1
43 #else
44 #define HAVE_TIMED_RWLOCK 0
45 #endif
46 
47 namespace art {
48 
49 class SHARED_LOCKABLE ReaderWriterMutex;
50 class SHARED_LOCKABLE MutatorMutex;
51 class ScopedContentionRecorder;
52 class Thread;
53 class Mutex;
54 
55 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
56 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
57 // partial ordering and thereby cause deadlock situations to fail checks.
58 //
59 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
60 enum LockLevel {
61   kLoggingLock = 0,
62   kSwapMutexesLock,
63   kUnexpectedSignalLock,
64   kThreadSuspendCountLock,
65   kAbortLock,
66   kNativeDebugInterfaceLock,
67   kSignalHandlingLock,
68   kJdwpAdbStateLock,
69   kJdwpSocketLock,
70   kRegionSpaceRegionLock,
71   kMarkSweepMarkStackLock,
72   kRosAllocGlobalLock,
73   kRosAllocBracketLock,
74   kRosAllocBulkFreeLock,
75   kTaggingLockLevel,
76   kTransactionLogLock,
77   kJniFunctionTableLock,
78   kJniWeakGlobalsLock,
79   kJniGlobalsLock,
80   kReferenceQueueSoftReferencesLock,
81   kReferenceQueuePhantomReferencesLock,
82   kReferenceQueueFinalizerReferencesLock,
83   kReferenceQueueWeakReferencesLock,
84   kReferenceQueueClearedReferencesLock,
85   kReferenceProcessorLock,
86   kJitDebugInterfaceLock,
87   kAllocSpaceLock,
88   kBumpPointerSpaceBlockLock,
89   kArenaPoolLock,
90   kInternTableLock,
91   kOatFileSecondaryLookupLock,
92   kHostDlOpenHandlesLock,
93   kVerifierDepsLock,
94   kOatFileManagerLock,
95   kTracingUniqueMethodsLock,
96   kTracingStreamingLock,
97   kDeoptimizedMethodsLock,
98   kClassLoaderClassesLock,
99   kDefaultMutexLevel,
100   kDexLock,
101   kMarkSweepLargeObjectLock,
102   kJdwpObjectRegistryLock,
103   kModifyLdtLock,
104   kAllocatedThreadIdsLock,
105   kMonitorPoolLock,
106   kClassLinkerClassesLock,  // TODO rename.
107   kDexToDexCompilerLock,
108   kJitCodeCacheLock,
109   kCHALock,
110   kSubtypeCheckLock,
111   kBreakpointLock,
112   kMonitorLock,
113   kMonitorListLock,
114   kJniLoadLibraryLock,
115   kThreadListLock,
116   kAllocTrackerLock,
117   kDeoptimizationLock,
118   kProfilerLock,
119   kJdwpShutdownLock,
120   kJdwpEventListLock,
121   kJdwpAttachLock,
122   kJdwpStartLock,
123   kRuntimeShutdownLock,
124   kTraceLock,
125   kHeapBitmapLock,
126   kMutatorLock,
127   kUserCodeSuspensionLock,
128   kInstrumentEntrypointsLock,
129   kZygoteCreationLock,
130 
131   // The highest valid lock level. Use this if there is code that should only be called with no
132   // other locks held. Since this is the highest lock level we also allow it to be held even if the
133   // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
134   // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
135   // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
136   // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
137   // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
138   // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
139   kTopLockLevel,
140 
141   kLockLevelCount  // Must come last.
142 };
143 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
144 
145 const bool kDebugLocking = kIsDebugBuild;
146 
147 // Record Log contention information, dumpable via SIGQUIT.
148 #ifdef ART_USE_FUTEXES
149 // To enable lock contention logging, set this to true.
150 const bool kLogLockContentions = false;
151 #else
152 // Keep this false as lock contention logging is supported only with
153 // futex.
154 const bool kLogLockContentions = false;
155 #endif
156 const size_t kContentionLogSize = 4;
157 const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
158 const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
159 
160 // Base class for all Mutex implementations
161 class BaseMutex {
162  public:
GetName()163   const char* GetName() const {
164     return name_;
165   }
166 
IsMutex()167   virtual bool IsMutex() const { return false; }
IsReaderWriterMutex()168   virtual bool IsReaderWriterMutex() const { return false; }
IsMutatorMutex()169   virtual bool IsMutatorMutex() const { return false; }
170 
171   virtual void Dump(std::ostream& os) const = 0;
172 
173   static void DumpAll(std::ostream& os);
174 
ShouldRespondToEmptyCheckpointRequest()175   bool ShouldRespondToEmptyCheckpointRequest() const {
176     return should_respond_to_empty_checkpoint_request_;
177   }
178 
SetShouldRespondToEmptyCheckpointRequest(bool value)179   void SetShouldRespondToEmptyCheckpointRequest(bool value) {
180     should_respond_to_empty_checkpoint_request_ = value;
181   }
182 
183   virtual void WakeupToRespondToEmptyCheckpoint() = 0;
184 
185  protected:
186   friend class ConditionVariable;
187 
188   BaseMutex(const char* name, LockLevel level);
189   virtual ~BaseMutex();
190   void RegisterAsLocked(Thread* self);
191   void RegisterAsUnlocked(Thread* self);
192   void CheckSafeToWait(Thread* self);
193 
194   friend class ScopedContentionRecorder;
195 
196   void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
197   void DumpContention(std::ostream& os) const;
198 
199   const LockLevel level_;  // Support for lock hierarchy.
200   const char* const name_;
201   bool should_respond_to_empty_checkpoint_request_;
202 
203   // A log entry that records contention but makes no guarantee that either tid will be held live.
204   struct ContentionLogEntry {
ContentionLogEntryContentionLogEntry205     ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
206     uint64_t blocked_tid;
207     uint64_t owner_tid;
208     AtomicInteger count;
209   };
210   struct ContentionLogData {
211     ContentionLogEntry contention_log[kContentionLogSize];
212     // The next entry in the contention log to be updated. Value ranges from 0 to
213     // kContentionLogSize - 1.
214     AtomicInteger cur_content_log_entry;
215     // Number of times the Mutex has been contended.
216     AtomicInteger contention_count;
217     // Sum of time waited by all contenders in ns.
218     Atomic<uint64_t> wait_time;
219     void AddToWaitTime(uint64_t value);
ContentionLogDataContentionLogData220     ContentionLogData() : wait_time(0) {}
221   };
222   ContentionLogData contention_log_data_[kContentionLogDataSize];
223 
224  public:
HasEverContended()225   bool HasEverContended() const {
226     if (kLogLockContentions) {
227       return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
228     }
229     return false;
230   }
231 };
232 
233 // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
234 // exclusive access to what it guards. A Mutex can be in one of two states:
235 // - Free - not owned by any thread,
236 // - Exclusive - owned by a single thread.
237 //
238 // The effect of locking and unlocking operations on the state is:
239 // State     | ExclusiveLock | ExclusiveUnlock
240 // -------------------------------------------
241 // Free      | Exclusive     | error
242 // Exclusive | Block*        | Free
243 // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
244 //   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
245 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
246 class LOCKABLE Mutex : public BaseMutex {
247  public:
248   explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
249   ~Mutex();
250 
IsMutex()251   virtual bool IsMutex() const { return true; }
252 
253   // Block until mutex is free then acquire exclusive access.
254   void ExclusiveLock(Thread* self) ACQUIRE();
Lock(Thread * self)255   void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
256 
257   // Returns true if acquires exclusive access, false otherwise.
258   bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
TryLock(Thread * self)259   bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
260 
261   // Release exclusive access.
262   void ExclusiveUnlock(Thread* self) RELEASE();
Unlock(Thread * self)263   void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
264 
265   // Is the current thread the exclusive holder of the Mutex.
266   ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
267 
268   // Assert that the Mutex is exclusively held by the current thread.
269   ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
270   ALWAYS_INLINE void AssertHeld(const Thread* self) const ASSERT_CAPABILITY(this);
271 
272   // Assert that the Mutex is not held by the current thread.
AssertNotHeldExclusive(const Thread * self)273   void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
274     if (kDebugLocking && (gAborting == 0)) {
275       CHECK(!IsExclusiveHeld(self)) << *this;
276     }
277   }
AssertNotHeld(const Thread * self)278   void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
279     AssertNotHeldExclusive(self);
280   }
281 
282   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
283   // than the owner.
284   pid_t GetExclusiveOwnerTid() const;
285 
286   // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
GetDepth()287   unsigned int GetDepth() const {
288     return recursion_count_;
289   }
290 
291   virtual void Dump(std::ostream& os) const;
292 
293   // For negative capabilities in clang annotations.
294   const Mutex& operator!() const { return *this; }
295 
296   void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
297 
298  private:
299 #if ART_USE_FUTEXES
300   // 0 is unheld, 1 is held.
301   AtomicInteger state_;
302   // Exclusive owner.
303   Atomic<pid_t> exclusive_owner_;
304   // Number of waiting contenders.
305   AtomicInteger num_contenders_;
306 #else
307   pthread_mutex_t mutex_;
308   Atomic<pid_t> exclusive_owner_;  // Guarded by mutex_. Asynchronous reads are OK.
309 #endif
310   const bool recursive_;  // Can the lock be recursively held?
311   unsigned int recursion_count_;
312   friend class ConditionVariable;
313   DISALLOW_COPY_AND_ASSIGN(Mutex);
314 };
315 
316 // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
317 // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
318 // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
319 // condition variable. A ReaderWriterMutex can be in one of three states:
320 // - Free - not owned by any thread,
321 // - Exclusive - owned by a single thread,
322 // - Shared(n) - shared amongst n threads.
323 //
324 // The effect of locking and unlocking operations on the state is:
325 //
326 // State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
327 // ----------------------------------------------------------------------------
328 // Free      | Exclusive     | error           | SharedLock(1)    | error
329 // Exclusive | Block         | Free            | Block            | error
330 // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
331 // * for large values of n the SharedLock may block.
332 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
333 class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
334  public:
335   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
336   ~ReaderWriterMutex();
337 
IsReaderWriterMutex()338   virtual bool IsReaderWriterMutex() const { return true; }
339 
340   // Block until ReaderWriterMutex is free then acquire exclusive access.
341   void ExclusiveLock(Thread* self) ACQUIRE();
WriterLock(Thread * self)342   void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
343 
344   // Release exclusive access.
345   void ExclusiveUnlock(Thread* self) RELEASE();
WriterUnlock(Thread * self)346   void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
347 
348   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
349   // or false if timeout is reached.
350 #if HAVE_TIMED_RWLOCK
351   bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
352       EXCLUSIVE_TRYLOCK_FUNCTION(true);
353 #endif
354 
355   // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
356   void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
ReaderLock(Thread * self)357   void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
358 
359   // Try to acquire share of ReaderWriterMutex.
360   bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
361 
362   // Release a share of the access.
363   void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
ReaderUnlock(Thread * self)364   void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
365 
366   // Is the current thread the exclusive holder of the ReaderWriterMutex.
367   ALWAYS_INLINE bool IsExclusiveHeld(const Thread* self) const;
368 
369   // Assert the current thread has exclusive access to the ReaderWriterMutex.
370   ALWAYS_INLINE void AssertExclusiveHeld(const Thread* self) const ASSERT_CAPABILITY(this);
371   ALWAYS_INLINE void AssertWriterHeld(const Thread* self) const ASSERT_CAPABILITY(this);
372 
373   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
AssertNotExclusiveHeld(const Thread * self)374   void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
375     if (kDebugLocking && (gAborting == 0)) {
376       CHECK(!IsExclusiveHeld(self)) << *this;
377     }
378   }
AssertNotWriterHeld(const Thread * self)379   void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
380     AssertNotExclusiveHeld(self);
381   }
382 
383   // Is the current thread a shared holder of the ReaderWriterMutex.
384   bool IsSharedHeld(const Thread* self) const;
385 
386   // Assert the current thread has shared access to the ReaderWriterMutex.
AssertSharedHeld(const Thread * self)387   ALWAYS_INLINE void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
388     if (kDebugLocking && (gAborting == 0)) {
389       // TODO: we can only assert this well when self != null.
390       CHECK(IsSharedHeld(self) || self == nullptr) << *this;
391     }
392   }
AssertReaderHeld(const Thread * self)393   ALWAYS_INLINE void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
394     AssertSharedHeld(self);
395   }
396 
397   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
398   // mode.
AssertNotHeld(const Thread * self)399   ALWAYS_INLINE void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
400     if (kDebugLocking && (gAborting == 0)) {
401       CHECK(!IsSharedHeld(self)) << *this;
402     }
403   }
404 
405   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
406   // than the owner. Returns 0 if the lock is not held. Returns either 0 or -1 if it is held by
407   // one or more readers.
408   pid_t GetExclusiveOwnerTid() const;
409 
410   virtual void Dump(std::ostream& os) const;
411 
412   // For negative capabilities in clang annotations.
413   const ReaderWriterMutex& operator!() const { return *this; }
414 
415   void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
416 
417  private:
418 #if ART_USE_FUTEXES
419   // Out-of-inline path for handling contention for a SharedLock.
420   void HandleSharedLockContention(Thread* self, int32_t cur_state);
421 
422   // -1 implies held exclusive, +ve shared held by state_ many owners.
423   AtomicInteger state_;
424   // Exclusive owner. Modification guarded by this mutex.
425   Atomic<pid_t> exclusive_owner_;
426   // Number of contenders waiting for a reader share.
427   AtomicInteger num_pending_readers_;
428   // Number of contenders waiting to be the writer.
429   AtomicInteger num_pending_writers_;
430 #else
431   pthread_rwlock_t rwlock_;
432   Atomic<pid_t> exclusive_owner_;  // Writes guarded by rwlock_. Asynchronous reads are OK.
433 #endif
434   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
435 };
436 
437 // MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
438 // Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
439 // thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
440 // held by any mutator threads. However, a thread in the kRunnable state is considered to have
441 // shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
442 // state have associated implications on lock ownership. Extra methods to handle the state
443 // transitions have been added to the interface but are only accessible to the methods dealing
444 // with state transitions. The thread state and flags attributes are used to ensure thread state
445 // transitions are consistent with the permitted behaviour of the mutex.
446 //
447 // *) The most important consequence of this behaviour is that all threads must be in one of the
448 // suspended states before exclusive ownership of the mutator mutex is sought.
449 //
450 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
451 class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
452  public:
453   explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
ReaderWriterMutex(name,level)454     : ReaderWriterMutex(name, level) {}
~MutatorMutex()455   ~MutatorMutex() {}
456 
IsMutatorMutex()457   virtual bool IsMutatorMutex() const { return true; }
458 
459   // For negative capabilities in clang annotations.
460   const MutatorMutex& operator!() const { return *this; }
461 
462  private:
463   friend class Thread;
464   void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
465   void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
466 
467   DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
468 };
469 
470 // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
471 // (Signal) or all at once (Broadcast).
472 class ConditionVariable {
473  public:
474   ConditionVariable(const char* name, Mutex& mutex);
475   ~ConditionVariable();
476 
477   void Broadcast(Thread* self);
478   void Signal(Thread* self);
479   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
480   //       pointer copy, thereby defeating annotalysis.
481   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
482   bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
483   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
484   // when waiting.
485   // TODO: remove this.
486   void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
487 
488  private:
489   const char* const name_;
490   // The Mutex being used by waiters. It is an error to mix condition variables between different
491   // Mutexes.
492   Mutex& guard_;
493 #if ART_USE_FUTEXES
494   // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
495   // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
496   // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
497   // without guard_ held.
498   AtomicInteger sequence_;
499   // Number of threads that have come into to wait, not the length of the waiters on the futex as
500   // waiters may have been requeued onto guard_. Guarded by guard_.
501   volatile int32_t num_waiters_;
502 #else
503   pthread_cond_t cond_;
504 #endif
505   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
506 };
507 
508 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
509 // upon destruction.
510 class SCOPED_CAPABILITY MutexLock {
511  public:
MutexLock(Thread * self,Mutex & mu)512   MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
513     mu_.ExclusiveLock(self_);
514   }
515 
RELEASE()516   ~MutexLock() RELEASE() {
517     mu_.ExclusiveUnlock(self_);
518   }
519 
520  private:
521   Thread* const self_;
522   Mutex& mu_;
523   DISALLOW_COPY_AND_ASSIGN(MutexLock);
524 };
525 // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
526 #define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
527 
528 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
529 // construction and releases it upon destruction.
530 class SCOPED_CAPABILITY ReaderMutexLock {
531  public:
532   ALWAYS_INLINE ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu);
533 
534   ALWAYS_INLINE ~ReaderMutexLock() RELEASE();
535 
536  private:
537   Thread* const self_;
538   ReaderWriterMutex& mu_;
539   DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
540 };
541 
542 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
543 // construction and releases it upon destruction.
544 class SCOPED_CAPABILITY WriterMutexLock {
545  public:
WriterMutexLock(Thread * self,ReaderWriterMutex & mu)546   WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
547       self_(self), mu_(mu) {
548     mu_.ExclusiveLock(self_);
549   }
550 
UNLOCK_FUNCTION()551   ~WriterMutexLock() UNLOCK_FUNCTION() {
552     mu_.ExclusiveUnlock(self_);
553   }
554 
555  private:
556   Thread* const self_;
557   ReaderWriterMutex& mu_;
558   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
559 };
560 // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
561 // "WriterMutexLock mu(lock)".
562 #define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
563 
564 // For StartNoThreadSuspension and EndNoThreadSuspension.
565 class CAPABILITY("role") Role {
566  public:
Acquire()567   void Acquire() ACQUIRE() {}
Release()568   void Release() RELEASE() {}
569   const Role& operator!() const { return *this; }
570 };
571 
572 class Uninterruptible : public Role {
573 };
574 
575 // Global mutexes corresponding to the levels above.
576 class Locks {
577  public:
578   static void Init();
579   static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
580 
581   // Destroying various lock types can emit errors that vary depending upon
582   // whether the client (art::Runtime) is currently active.  Allow the client
583   // to set a callback that is used to check when it is acceptable to call
584   // Abort.  The default behavior is that the client *is not* able to call
585   // Abort if no callback is established.
586   using ClientCallback = bool();
587   static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
588   // Checks for whether it is safe to call Abort() without using locks.
589   static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
590 
591   // Add a mutex to expected_mutexes_on_weak_ref_access_.
592   static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
593   // Remove a mutex from expected_mutexes_on_weak_ref_access_.
594   static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
595   // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
596   static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
597 
598   // Guards allocation entrypoint instrumenting.
599   static Mutex* instrument_entrypoints_lock_;
600 
601   // Guards code that deals with user-code suspension. This mutex must be held when suspending or
602   // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
603   // only if the suspension is not due to SuspendReason::kForUserCode.
604   static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
605 
606   // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
607   // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
608   // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
609   // thread; threads in the runnable state will pass the barrier when they transit to the suspended
610   // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
611   //
612   // Thread suspension:
613   // mutator thread                                | GC/Debugger
614   //   .. running ..                               |   .. running ..
615   //   .. running ..                               | Request thread suspension by:
616   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
617   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
618   //   .. running ..                               |     all mutator threads
619   //   .. running ..                               |   - releasing thread_suspend_count_lock_
620   //   .. running ..                               | Block wait for all threads to pass a barrier
621   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
622   // suspend code.                                 |   .. blocked ..
623   // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
624   // x: Acquire thread_suspend_count_lock_         |   .. running ..
625   // while Thread::suspend_count_ > 0              |   .. running ..
626   //   - wait on Thread::resume_cond_              |   .. running ..
627   //     (releases thread_suspend_count_lock_)     |   .. running ..
628   //   .. waiting ..                               | Request thread resumption by:
629   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
630   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
631   //   .. waiting ..                               |     all mutator threads
632   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
633   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
634   // Release thread_suspend_count_lock_            |  .. running ..
635   // Change to kRunnable                           |  .. running ..
636   //  - this uses a CAS operation to ensure the    |  .. running ..
637   //    suspend request flag isn't raised as the   |  .. running ..
638   //    state is changed                           |  .. running ..
639   //  - if the CAS operation fails then goto x     |  .. running ..
640   //  .. running ..                                |  .. running ..
641   static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
642 
643   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
644   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
645 
646   // Guards shutdown of the runtime.
647   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
648 
649   // Guards background profiler global state.
650   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
651 
652   // Guards trace (ie traceview) requests.
653   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
654 
655   // Guards debugger recent allocation records.
656   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
657 
658   // Guards updates to instrumentation to ensure mutual exclusion of
659   // events like deoptimization requests.
660   // TODO: improve name, perhaps instrumentation_update_lock_.
661   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
662 
663   // Guards Class Hierarchy Analysis (CHA).
664   static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
665 
666   // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
667   // This lock is used in SubtypeCheck methods which are the interface for
668   // any SubtypeCheck-mutating methods.
669   // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
670   static Mutex* subtype_check_lock_ ACQUIRED_AFTER(cha_lock_);
671 
672   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
673   // attaching and detaching.
674   static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
675 
676   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
677   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
678 
679   // Guards maintaining loading library data structures.
680   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
681 
682   // Guards breakpoints.
683   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
684 
685   // Guards lists of classes within the class linker.
686   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
687 
688   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
689   // doesn't try to hold a higher level Mutex.
690   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
691 
692   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
693 
694   // Guard the allocation/deallocation of thread ids.
695   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
696 
697   // Guards modification of the LDT on x86.
698   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
699 
700   static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
701 
702   // Guards opened oat files in OatFileManager.
703   static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
704 
705   // Guards extra string entries for VerifierDeps.
706   static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
707 
708   // Guards dlopen_handles_ in DlOpenOatFile.
709   static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
710 
711   // Guards intern table.
712   static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
713 
714   // Guards reference processor.
715   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
716 
717   // Guards cleared references queue.
718   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
719 
720   // Guards weak references queue.
721   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
722 
723   // Guards finalizer references queue.
724   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
725 
726   // Guards phantom references queue.
727   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
728 
729   // Guards soft references queue.
730   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
731 
732   // Guard accesses to the JNI Global Reference table.
733   static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
734 
735   // Guard accesses to the JNI Weak Global Reference table.
736   static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
737 
738   // Guard accesses to the JNI function table override.
739   static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
740 
741   // Have an exclusive aborting thread.
742   static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
743 
744   // Allow mutual exclusion when manipulating Thread::suspend_count_.
745   // TODO: Does the trade-off of a per-thread lock make sense?
746   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
747 
748   // One unexpected signal at a time lock.
749   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
750 
751   // Guards the magic global variables used by native tools (e.g. libunwind).
752   static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
753 
754   // Have an exclusive logging thread.
755   static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
756 
757   // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
758   // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
759   // encounter an unexpected mutex on accessing weak refs,
760   // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
761   static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
762   static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
763   class ScopedExpectedMutexesOnWeakRefAccessLock;
764 };
765 
766 class Roles {
767  public:
768   // Uninterruptible means that the thread may not become suspended.
769   static Uninterruptible uninterruptible_;
770 };
771 
772 }  // namespace art
773 
774 #endif  // ART_RUNTIME_BASE_MUTEX_H_
775