1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_BASE_MUTEX_H_
18 #define ART_RUNTIME_BASE_MUTEX_H_
19 
20 #include <pthread.h>
21 #include <stdint.h>
22 
23 #include <iosfwd>
24 #include <string>
25 
26 #include "atomic.h"
27 #include "base/logging.h"
28 #include "base/macros.h"
29 #include "globals.h"
30 
31 #if defined(__APPLE__)
32 #define ART_USE_FUTEXES 0
33 #else
34 #define ART_USE_FUTEXES 1
35 #endif
36 
37 // Currently Darwin doesn't support locks with timeouts.
38 #if !defined(__APPLE__)
39 #define HAVE_TIMED_RWLOCK 1
40 #else
41 #define HAVE_TIMED_RWLOCK 0
42 #endif
43 
44 namespace art {
45 
46 class SHARED_LOCKABLE ReaderWriterMutex;
47 class SHARED_LOCKABLE MutatorMutex;
48 class ScopedContentionRecorder;
49 class Thread;
50 
51 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
52 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
53 // partial ordering and thereby cause deadlock situations to fail checks.
54 //
55 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
56 enum LockLevel {
57   kLoggingLock = 0,
58   kMemMapsLock,
59   kSwapMutexesLock,
60   kUnexpectedSignalLock,
61   kThreadSuspendCountLock,
62   kAbortLock,
63   kLambdaTableLock,
64   kJdwpSocketLock,
65   kRegionSpaceRegionLock,
66   kRosAllocGlobalLock,
67   kRosAllocBracketLock,
68   kRosAllocBulkFreeLock,
69   kMarkSweepMarkStackLock,
70   kTransactionLogLock,
71   kJniWeakGlobalsLock,
72   kReferenceQueueSoftReferencesLock,
73   kReferenceQueuePhantomReferencesLock,
74   kReferenceQueueFinalizerReferencesLock,
75   kReferenceQueueWeakReferencesLock,
76   kReferenceQueueClearedReferencesLock,
77   kReferenceProcessorLock,
78   kJitDebugInterfaceLock,
79   kAllocSpaceLock,
80   kBumpPointerSpaceBlockLock,
81   kArenaPoolLock,
82   kDexFileMethodInlinerLock,
83   kDexFileToMethodInlinerMapLock,
84   kInternTableLock,
85   kOatFileSecondaryLookupLock,
86   kHostDlOpenHandlesLock,
87   kOatFileManagerLock,
88   kTracingUniqueMethodsLock,
89   kTracingStreamingLock,
90   kDeoptimizedMethodsLock,
91   kJitCodeCacheLock,
92   kClassLoaderClassesLock,
93   kDefaultMutexLevel,
94   kMarkSweepLargeObjectLock,
95   kPinTableLock,
96   kJdwpObjectRegistryLock,
97   kModifyLdtLock,
98   kAllocatedThreadIdsLock,
99   kMonitorPoolLock,
100   kMethodVerifiersLock,
101   kClassLinkerClassesLock,  // TODO rename.
102   kBreakpointLock,
103   kMonitorLock,
104   kMonitorListLock,
105   kJniLoadLibraryLock,
106   kThreadListLock,
107   kAllocTrackerLock,
108   kDeoptimizationLock,
109   kProfilerLock,
110   kJdwpShutdownLock,
111   kJdwpEventListLock,
112   kJdwpAttachLock,
113   kJdwpStartLock,
114   kRuntimeShutdownLock,
115   kTraceLock,
116   kHeapBitmapLock,
117   kMutatorLock,
118   kInstrumentEntrypointsLock,
119   kZygoteCreationLock,
120 
121   kLockLevelCount  // Must come last.
122 };
123 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
124 
125 const bool kDebugLocking = kIsDebugBuild;
126 
127 // Record Log contention information, dumpable via SIGQUIT.
128 #ifdef ART_USE_FUTEXES
129 // To enable lock contention logging, set this to true.
130 const bool kLogLockContentions = false;
131 #else
132 // Keep this false as lock contention logging is supported only with
133 // futex.
134 const bool kLogLockContentions = false;
135 #endif
136 const size_t kContentionLogSize = 4;
137 const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
138 const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
139 
140 // Base class for all Mutex implementations
141 class BaseMutex {
142  public:
GetName()143   const char* GetName() const {
144     return name_;
145   }
146 
IsMutex()147   virtual bool IsMutex() const { return false; }
IsReaderWriterMutex()148   virtual bool IsReaderWriterMutex() const { return false; }
IsMutatorMutex()149   virtual bool IsMutatorMutex() const { return false; }
150 
151   virtual void Dump(std::ostream& os) const = 0;
152 
153   static void DumpAll(std::ostream& os);
154 
155  protected:
156   friend class ConditionVariable;
157 
158   BaseMutex(const char* name, LockLevel level);
159   virtual ~BaseMutex();
160   void RegisterAsLocked(Thread* self);
161   void RegisterAsUnlocked(Thread* self);
162   void CheckSafeToWait(Thread* self);
163 
164   friend class ScopedContentionRecorder;
165 
166   void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
167   void DumpContention(std::ostream& os) const;
168 
169   const LockLevel level_;  // Support for lock hierarchy.
170   const char* const name_;
171 
172   // A log entry that records contention but makes no guarantee that either tid will be held live.
173   struct ContentionLogEntry {
ContentionLogEntryContentionLogEntry174     ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
175     uint64_t blocked_tid;
176     uint64_t owner_tid;
177     AtomicInteger count;
178   };
179   struct ContentionLogData {
180     ContentionLogEntry contention_log[kContentionLogSize];
181     // The next entry in the contention log to be updated. Value ranges from 0 to
182     // kContentionLogSize - 1.
183     AtomicInteger cur_content_log_entry;
184     // Number of times the Mutex has been contended.
185     AtomicInteger contention_count;
186     // Sum of time waited by all contenders in ns.
187     Atomic<uint64_t> wait_time;
188     void AddToWaitTime(uint64_t value);
ContentionLogDataContentionLogData189     ContentionLogData() : wait_time(0) {}
190   };
191   ContentionLogData contention_log_data_[kContentionLogDataSize];
192 
193  public:
HasEverContended()194   bool HasEverContended() const {
195     if (kLogLockContentions) {
196       return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
197     }
198     return false;
199   }
200 };
201 
202 // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
203 // exclusive access to what it guards. A Mutex can be in one of two states:
204 // - Free - not owned by any thread,
205 // - Exclusive - owned by a single thread.
206 //
207 // The effect of locking and unlocking operations on the state is:
208 // State     | ExclusiveLock | ExclusiveUnlock
209 // -------------------------------------------
210 // Free      | Exclusive     | error
211 // Exclusive | Block*        | Free
212 // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
213 //   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
214 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
215 class LOCKABLE Mutex : public BaseMutex {
216  public:
217   explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
218   ~Mutex();
219 
IsMutex()220   virtual bool IsMutex() const { return true; }
221 
222   // Block until mutex is free then acquire exclusive access.
223   void ExclusiveLock(Thread* self) ACQUIRE();
Lock(Thread * self)224   void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
225 
226   // Returns true if acquires exclusive access, false otherwise.
227   bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
TryLock(Thread * self)228   bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
229 
230   // Release exclusive access.
231   void ExclusiveUnlock(Thread* self) RELEASE();
Unlock(Thread * self)232   void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
233 
234   // Is the current thread the exclusive holder of the Mutex.
235   bool IsExclusiveHeld(const Thread* self) const;
236 
237   // Assert that the Mutex is exclusively held by the current thread.
AssertExclusiveHeld(const Thread * self)238   void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
239     if (kDebugLocking && (gAborting == 0)) {
240       CHECK(IsExclusiveHeld(self)) << *this;
241     }
242   }
AssertHeld(const Thread * self)243   void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
244 
245   // Assert that the Mutex is not held by the current thread.
AssertNotHeldExclusive(const Thread * self)246   void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
247     if (kDebugLocking && (gAborting == 0)) {
248       CHECK(!IsExclusiveHeld(self)) << *this;
249     }
250   }
AssertNotHeld(const Thread * self)251   void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
252     AssertNotHeldExclusive(self);
253   }
254 
255   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
256   // than the owner.
257   uint64_t GetExclusiveOwnerTid() const;
258 
259   // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
GetDepth()260   unsigned int GetDepth() const {
261     return recursion_count_;
262   }
263 
264   virtual void Dump(std::ostream& os) const;
265 
266   // For negative capabilities in clang annotations.
267   const Mutex& operator!() const { return *this; }
268 
269  private:
270 #if ART_USE_FUTEXES
271   // 0 is unheld, 1 is held.
272   AtomicInteger state_;
273   // Exclusive owner.
274   volatile uint64_t exclusive_owner_;
275   // Number of waiting contenders.
276   AtomicInteger num_contenders_;
277 #else
278   pthread_mutex_t mutex_;
279   volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
280 #endif
281   const bool recursive_;  // Can the lock be recursively held?
282   unsigned int recursion_count_;
283   friend class ConditionVariable;
284   DISALLOW_COPY_AND_ASSIGN(Mutex);
285 };
286 
287 // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
288 // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
289 // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
290 // condition variable. A ReaderWriterMutex can be in one of three states:
291 // - Free - not owned by any thread,
292 // - Exclusive - owned by a single thread,
293 // - Shared(n) - shared amongst n threads.
294 //
295 // The effect of locking and unlocking operations on the state is:
296 //
297 // State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
298 // ----------------------------------------------------------------------------
299 // Free      | Exclusive     | error           | SharedLock(1)    | error
300 // Exclusive | Block         | Free            | Block            | error
301 // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
302 // * for large values of n the SharedLock may block.
303 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
304 class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
305  public:
306   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
307   ~ReaderWriterMutex();
308 
IsReaderWriterMutex()309   virtual bool IsReaderWriterMutex() const { return true; }
310 
311   // Block until ReaderWriterMutex is free then acquire exclusive access.
312   void ExclusiveLock(Thread* self) ACQUIRE();
WriterLock(Thread * self)313   void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
314 
315   // Release exclusive access.
316   void ExclusiveUnlock(Thread* self) RELEASE();
WriterUnlock(Thread * self)317   void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
318 
319   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
320   // or false if timeout is reached.
321 #if HAVE_TIMED_RWLOCK
322   bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
323       EXCLUSIVE_TRYLOCK_FUNCTION(true);
324 #endif
325 
326   // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
327   void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
ReaderLock(Thread * self)328   void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
329 
330   // Try to acquire share of ReaderWriterMutex.
331   bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
332 
333   // Release a share of the access.
334   void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
ReaderUnlock(Thread * self)335   void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
336 
337   // Is the current thread the exclusive holder of the ReaderWriterMutex.
338   bool IsExclusiveHeld(const Thread* self) const;
339 
340   // Assert the current thread has exclusive access to the ReaderWriterMutex.
AssertExclusiveHeld(const Thread * self)341   void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
342     if (kDebugLocking && (gAborting == 0)) {
343       CHECK(IsExclusiveHeld(self)) << *this;
344     }
345   }
AssertWriterHeld(const Thread * self)346   void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
347 
348   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
AssertNotExclusiveHeld(const Thread * self)349   void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
350     if (kDebugLocking && (gAborting == 0)) {
351       CHECK(!IsExclusiveHeld(self)) << *this;
352     }
353   }
AssertNotWriterHeld(const Thread * self)354   void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
355     AssertNotExclusiveHeld(self);
356   }
357 
358   // Is the current thread a shared holder of the ReaderWriterMutex.
359   bool IsSharedHeld(const Thread* self) const;
360 
361   // Assert the current thread has shared access to the ReaderWriterMutex.
AssertSharedHeld(const Thread * self)362   void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
363     if (kDebugLocking && (gAborting == 0)) {
364       // TODO: we can only assert this well when self != null.
365       CHECK(IsSharedHeld(self) || self == nullptr) << *this;
366     }
367   }
AssertReaderHeld(const Thread * self)368   void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
369     AssertSharedHeld(self);
370   }
371 
372   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
373   // mode.
AssertNotHeld(const Thread * self)374   void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
375     if (kDebugLocking && (gAborting == 0)) {
376       CHECK(!IsSharedHeld(self)) << *this;
377     }
378   }
379 
380   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
381   // than the owner.
382   uint64_t GetExclusiveOwnerTid() const;
383 
384   virtual void Dump(std::ostream& os) const;
385 
386   // For negative capabilities in clang annotations.
387   const ReaderWriterMutex& operator!() const { return *this; }
388 
389  private:
390 #if ART_USE_FUTEXES
391   // Out-of-inline path for handling contention for a SharedLock.
392   void HandleSharedLockContention(Thread* self, int32_t cur_state);
393 
394   // -1 implies held exclusive, +ve shared held by state_ many owners.
395   AtomicInteger state_;
396   // Exclusive owner. Modification guarded by this mutex.
397   volatile uint64_t exclusive_owner_;
398   // Number of contenders waiting for a reader share.
399   AtomicInteger num_pending_readers_;
400   // Number of contenders waiting to be the writer.
401   AtomicInteger num_pending_writers_;
402 #else
403   pthread_rwlock_t rwlock_;
404   volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
405 #endif
406   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
407 };
408 
409 // MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
410 // Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
411 // thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
412 // held by any mutator threads. However, a thread in the kRunnable state is considered to have
413 // shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
414 // state have associated implications on lock ownership. Extra methods to handle the state
415 // transitions have been added to the interface but are only accessible to the methods dealing
416 // with state transitions. The thread state and flags attributes are used to ensure thread state
417 // transitions are consistent with the permitted behaviour of the mutex.
418 //
419 // *) The most important consequence of this behaviour is that all threads must be in one of the
420 // suspended states before exclusive ownership of the mutator mutex is sought.
421 //
422 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
423 class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
424  public:
425   explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
ReaderWriterMutex(name,level)426     : ReaderWriterMutex(name, level) {}
~MutatorMutex()427   ~MutatorMutex() {}
428 
IsMutatorMutex()429   virtual bool IsMutatorMutex() const { return true; }
430 
431   // For negative capabilities in clang annotations.
432   const MutatorMutex& operator!() const { return *this; }
433 
434  private:
435   friend class Thread;
436   void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
437   void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
438 
439   DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
440 };
441 
442 // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
443 // (Signal) or all at once (Broadcast).
444 class ConditionVariable {
445  public:
446   ConditionVariable(const char* name, Mutex& mutex);
447   ~ConditionVariable();
448 
449   void Broadcast(Thread* self);
450   void Signal(Thread* self);
451   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
452   //       pointer copy, thereby defeating annotalysis.
453   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
454   bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
455   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
456   // when waiting.
457   // TODO: remove this.
458   void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
459 
460  private:
461   const char* const name_;
462   // The Mutex being used by waiters. It is an error to mix condition variables between different
463   // Mutexes.
464   Mutex& guard_;
465 #if ART_USE_FUTEXES
466   // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
467   // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
468   // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
469   // without guard_ held.
470   AtomicInteger sequence_;
471   // Number of threads that have come into to wait, not the length of the waiters on the futex as
472   // waiters may have been requeued onto guard_. Guarded by guard_.
473   volatile int32_t num_waiters_;
474 #else
475   pthread_cond_t cond_;
476 #endif
477   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
478 };
479 
480 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
481 // upon destruction.
482 class SCOPED_CAPABILITY MutexLock {
483  public:
MutexLock(Thread * self,Mutex & mu)484   MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
485     mu_.ExclusiveLock(self_);
486   }
487 
RELEASE()488   ~MutexLock() RELEASE() {
489     mu_.ExclusiveUnlock(self_);
490   }
491 
492  private:
493   Thread* const self_;
494   Mutex& mu_;
495   DISALLOW_COPY_AND_ASSIGN(MutexLock);
496 };
497 // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
498 #define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
499 
500 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
501 // construction and releases it upon destruction.
502 class SCOPED_CAPABILITY ReaderMutexLock {
503  public:
ReaderMutexLock(Thread * self,ReaderWriterMutex & mu)504   ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
505       self_(self), mu_(mu) {
506     mu_.SharedLock(self_);
507   }
508 
RELEASE()509   ~ReaderMutexLock() RELEASE() {
510     mu_.SharedUnlock(self_);
511   }
512 
513  private:
514   Thread* const self_;
515   ReaderWriterMutex& mu_;
516   DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
517 };
518 // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
519 // "ReaderMutexLock mu(lock)".
520 #define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
521 
522 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
523 // construction and releases it upon destruction.
524 class SCOPED_CAPABILITY WriterMutexLock {
525  public:
WriterMutexLock(Thread * self,ReaderWriterMutex & mu)526   WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
527       self_(self), mu_(mu) {
528     mu_.ExclusiveLock(self_);
529   }
530 
UNLOCK_FUNCTION()531   ~WriterMutexLock() UNLOCK_FUNCTION() {
532     mu_.ExclusiveUnlock(self_);
533   }
534 
535  private:
536   Thread* const self_;
537   ReaderWriterMutex& mu_;
538   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
539 };
540 // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
541 // "WriterMutexLock mu(lock)".
542 #define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
543 
544 // For StartNoThreadSuspension and EndNoThreadSuspension.
545 class CAPABILITY("role") Role {
546  public:
Acquire()547   void Acquire() ACQUIRE() {}
Release()548   void Release() RELEASE() {}
549   const Role& operator!() const { return *this; }
550 };
551 
552 class Uninterruptible : public Role {
553 };
554 
555 // Global mutexes corresponding to the levels above.
556 class Locks {
557  public:
558   static void Init();
559   static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
560   // Guards allocation entrypoint instrumenting.
561   static Mutex* instrument_entrypoints_lock_;
562 
563   // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
564   // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
565   // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
566   // thread; threads in the runnable state will pass the barrier when they transit to the suspended
567   // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
568   //
569   // Thread suspension:
570   // mutator thread                                | GC/Debugger
571   //   .. running ..                               |   .. running ..
572   //   .. running ..                               | Request thread suspension by:
573   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
574   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
575   //   .. running ..                               |     all mutator threads
576   //   .. running ..                               |   - releasing thread_suspend_count_lock_
577   //   .. running ..                               | Block wait for all threads to pass a barrier
578   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
579   // suspend code.                                 |   .. blocked ..
580   // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
581   // x: Acquire thread_suspend_count_lock_         |   .. running ..
582   // while Thread::suspend_count_ > 0              |   .. running ..
583   //   - wait on Thread::resume_cond_              |   .. running ..
584   //     (releases thread_suspend_count_lock_)     |   .. running ..
585   //   .. waiting ..                               | Request thread resumption by:
586   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
587   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
588   //   .. waiting ..                               |     all mutator threads
589   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
590   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
591   // Release thread_suspend_count_lock_            |  .. running ..
592   // Change to kRunnable                           |  .. running ..
593   //  - this uses a CAS operation to ensure the    |  .. running ..
594   //    suspend request flag isn't raised as the   |  .. running ..
595   //    state is changed                           |  .. running ..
596   //  - if the CAS operation fails then goto x     |  .. running ..
597   //  .. running ..                                |  .. running ..
598   static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
599 
600   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
601   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
602 
603   // Guards shutdown of the runtime.
604   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
605 
606   // Guards background profiler global state.
607   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
608 
609   // Guards trace (ie traceview) requests.
610   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
611 
612   // Guards debugger recent allocation records.
613   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
614 
615   // Guards updates to instrumentation to ensure mutual exclusion of
616   // events like deoptimization requests.
617   // TODO: improve name, perhaps instrumentation_update_lock_.
618   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
619 
620   // Guards String initializer register map in interpreter.
621   static Mutex* interpreter_string_init_map_lock_ ACQUIRED_AFTER(deoptimization_lock_);
622 
623   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
624   // attaching and detaching.
625   static Mutex* thread_list_lock_ ACQUIRED_AFTER(interpreter_string_init_map_lock_);
626 
627   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
628   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
629 
630   // Guards maintaining loading library data structures.
631   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
632 
633   // Guards breakpoints.
634   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
635 
636   // Guards lists of classes within the class linker.
637   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
638 
639   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
640   // doesn't try to hold a higher level Mutex.
641   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
642 
643   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
644 
645   // Guard the allocation/deallocation of thread ids.
646   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
647 
648   // Guards modification of the LDT on x86.
649   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
650 
651   // Guards opened oat files in OatFileManager.
652   static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
653 
654   // Guards dlopen_handles_ in DlOpenOatFile.
655   static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
656 
657   // Guards intern table.
658   static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
659 
660   // Guards reference processor.
661   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
662 
663   // Guards cleared references queue.
664   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
665 
666   // Guards weak references queue.
667   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
668 
669   // Guards finalizer references queue.
670   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
671 
672   // Guards phantom references queue.
673   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
674 
675   // Guards soft references queue.
676   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
677 
678   // Have an exclusive aborting thread.
679   static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
680 
681   // Allow mutual exclusion when manipulating Thread::suspend_count_.
682   // TODO: Does the trade-off of a per-thread lock make sense?
683   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
684 
685   // One unexpected signal at a time lock.
686   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
687 
688   // Guards the maps in mem_map.
689   static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
690 
691   // Have an exclusive logging thread.
692   static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
693 
694   // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
695   // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
696   static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
697 };
698 
699 class Roles {
700  public:
701   // Uninterruptible means that the thread may not become suspended.
702   static Uninterruptible uninterruptible_;
703 };
704 
705 }  // namespace art
706 
707 #endif  // ART_RUNTIME_BASE_MUTEX_H_
708