1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MONITOR_H_
18 #define ART_RUNTIME_MONITOR_H_
19 
20 #include <pthread.h>
21 #include <stdint.h>
22 #include <stdlib.h>
23 
24 #include <atomic>
25 #include <iosfwd>
26 #include <list>
27 #include <vector>
28 
29 #include "base/allocator.h"
30 #include "base/atomic.h"
31 #include "base/mutex.h"
32 #include "gc_root.h"
33 #include "lock_word.h"
34 #include "obj_ptr.h"
35 #include "read_barrier_option.h"
36 #include "runtime_callbacks.h"
37 #include "thread_state.h"
38 
39 namespace art {
40 
41 class ArtMethod;
42 class IsMarkedVisitor;
43 class LockWord;
44 template<class T> class Handle;
45 class StackVisitor;
46 class Thread;
47 typedef uint32_t MonitorId;
48 
49 namespace mirror {
50 class Object;
51 }  // namespace mirror
52 
53 enum class LockReason {
54   kForWait,
55   kForLock,
56 };
57 
58 class Monitor {
59  public:
60   // The default number of spins that are done before thread suspension is used to forcibly inflate
61   // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
62   constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
63 
64   ~Monitor();
65 
66   static void Init(uint32_t lock_profiling_threshold, uint32_t stack_dump_lock_profiling_threshold);
67 
68   // Return the thread id of the lock owner or 0 when there is no owner.
69   static uint32_t GetLockOwnerThreadId(ObjPtr<mirror::Object> obj)
70       NO_THREAD_SAFETY_ANALYSIS;  // TODO: Reading lock owner without holding lock is racy.
71 
72   // NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
73   static ObjPtr<mirror::Object> MonitorEnter(Thread* thread,
74                                              ObjPtr<mirror::Object> obj,
75                                              bool trylock)
76       EXCLUSIVE_LOCK_FUNCTION(obj.Ptr())
77       NO_THREAD_SAFETY_ANALYSIS
78       REQUIRES(!Roles::uninterruptible_)
79       REQUIRES_SHARED(Locks::mutator_lock_);
80 
81   // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
82   static bool MonitorExit(Thread* thread, ObjPtr<mirror::Object> obj)
83       NO_THREAD_SAFETY_ANALYSIS
84       REQUIRES(!Roles::uninterruptible_)
85       REQUIRES_SHARED(Locks::mutator_lock_)
86       UNLOCK_FUNCTION(obj.Ptr());
87 
Notify(Thread * self,ObjPtr<mirror::Object> obj)88   static void Notify(Thread* self, ObjPtr<mirror::Object> obj)
89       REQUIRES_SHARED(Locks::mutator_lock_) {
90     DoNotify(self, obj, false);
91   }
NotifyAll(Thread * self,ObjPtr<mirror::Object> obj)92   static void NotifyAll(Thread* self, ObjPtr<mirror::Object> obj)
93       REQUIRES_SHARED(Locks::mutator_lock_) {
94     DoNotify(self, obj, true);
95   }
96 
97   // Object.wait().  Also called for class init.
98   // NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
99   static void Wait(Thread* self,
100                    ObjPtr<mirror::Object> obj,
101                    int64_t ms,
102                    int32_t ns,
103                    bool interruptShouldThrow, ThreadState why)
104       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
105 
106   static ThreadState FetchState(const Thread* thread,
107                                 /* out */ ObjPtr<mirror::Object>* monitor_object,
108                                 /* out */ uint32_t* lock_owner_tid)
109       REQUIRES(!Locks::thread_suspend_count_lock_)
110       REQUIRES_SHARED(Locks::mutator_lock_);
111 
112   // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
113   static ObjPtr<mirror::Object> GetContendedMonitor(Thread* thread)
114       REQUIRES_SHARED(Locks::mutator_lock_);
115 
116   // Calls 'callback' once for each lock held in the single stack frame represented by
117   // the current state of 'stack_visitor'.
118   // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
119   // is necessary when we have already aborted but want to dump the stack as much as we can.
120   static void VisitLocks(StackVisitor* stack_visitor,
121                          void (*callback)(ObjPtr<mirror::Object>, void*),
122                          void* callback_context,
123                          bool abort_on_failure = true)
124       REQUIRES_SHARED(Locks::mutator_lock_);
125 
126   static bool IsValidLockWord(LockWord lock_word);
127 
128   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
129   ObjPtr<mirror::Object> GetObject() REQUIRES_SHARED(Locks::mutator_lock_);
130 
131   void SetObject(ObjPtr<mirror::Object> object);
132 
133   // Provides no memory ordering guarantees.
GetOwner()134   Thread* GetOwner() const {
135     return owner_.load(std::memory_order_relaxed);
136   }
137 
138   int32_t GetHashCode();
139 
140   // Is the monitor currently locked? Debug only, provides no memory ordering guarantees.
141   bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
142 
HasHashCode()143   bool HasHashCode() const {
144     return hash_code_.load(std::memory_order_relaxed) != 0;
145   }
146 
GetMonitorId()147   MonitorId GetMonitorId() const {
148     return monitor_id_;
149   }
150 
151   // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
152   static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
153                                 uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_);
154 
155   // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
156   // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
157   // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
158   static bool Deflate(Thread* self, ObjPtr<mirror::Object> obj)
159       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
160 
161 #ifndef __LP64__
new(size_t size)162   void* operator new(size_t size) {
163     // Align Monitor* as per the monitor ID field size in the lock word.
164     void* result;
165     int error = posix_memalign(&result, LockWord::kMonitorIdAlignment, size);
166     CHECK_EQ(error, 0) << strerror(error);
167     return result;
168   }
169 
delete(void * ptr)170   void operator delete(void* ptr) {
171     free(ptr);
172   }
173 #endif
174 
175  private:
176   Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
177       REQUIRES_SHARED(Locks::mutator_lock_);
178   Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code, MonitorId id)
179       REQUIRES_SHARED(Locks::mutator_lock_);
180 
181   // Install the monitor into its object, may fail if another thread installs a different monitor
182   // first. Monitor remains in the same logical state as before, i.e. held the same # of times.
183   bool Install(Thread* self)
184       REQUIRES(!monitor_lock_)
185       REQUIRES_SHARED(Locks::mutator_lock_);
186 
187   // Links a thread into a monitor's wait set.  The monitor lock must be held by the caller of this
188   // routine.
189   void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_);
190 
191   // Unlinks a thread from a monitor's wait set.  The monitor lock must be held by the caller of
192   // this routine.
193   void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
194 
195   // Release the monitor lock and signal a waiting thread that has been notified and now needs the
196   // lock. Assumes the monitor lock is held exactly once, and the owner_ field has been reset to
197   // null. Caller may be suspended (Wait) or runnable (MonitorExit).
198   void SignalWaiterAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
199 
200   // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
201   // calling thread must own the lock or the owner must be suspended. There's a race with other
202   // threads inflating the lock, installing hash codes and spurious failures. The caller should
203   // re-read the lock word following the call.
204   static void Inflate(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
205       REQUIRES_SHARED(Locks::mutator_lock_)
206       NO_THREAD_SAFETY_ANALYSIS;  // For m->Install(self)
207 
208   void LogContentionEvent(Thread* self,
209                           uint32_t wait_ms,
210                           uint32_t sample_percent,
211                           ArtMethod* owner_method,
212                           uint32_t owner_dex_pc)
213       REQUIRES_SHARED(Locks::mutator_lock_);
214 
215   static void FailedUnlock(ObjPtr<mirror::Object> obj,
216                            uint32_t expected_owner_thread_id,
217                            uint32_t found_owner_thread_id,
218                            Monitor* mon)
219       REQUIRES(!Locks::thread_list_lock_)
220       REQUIRES_SHARED(Locks::mutator_lock_);
221 
222   // Try to lock without blocking, returns true if we acquired the lock.
223   // If spin is true, then we spin for a short period before failing.
224   bool TryLock(Thread* self, bool spin = false)
225       TRY_ACQUIRE(true, monitor_lock_)
226       REQUIRES_SHARED(Locks::mutator_lock_);
227 
228   template<LockReason reason = LockReason::kForLock>
229   void Lock(Thread* self)
230       ACQUIRE(monitor_lock_)
231       REQUIRES_SHARED(Locks::mutator_lock_);
232 
233   bool Unlock(Thread* thread)
234       RELEASE(monitor_lock_)
235       REQUIRES_SHARED(Locks::mutator_lock_);
236 
237   static void DoNotify(Thread* self, ObjPtr<mirror::Object> obj, bool notify_all)
238       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;  // For mon->Notify.
239 
240   void Notify(Thread* self)
241       REQUIRES(monitor_lock_)
242       REQUIRES_SHARED(Locks::mutator_lock_);
243 
244   void NotifyAll(Thread* self)
245       REQUIRES(monitor_lock_)
246       REQUIRES_SHARED(Locks::mutator_lock_);
247 
248   static std::string PrettyContentionInfo(const std::string& owner_name,
249                                           pid_t owner_tid,
250                                           ArtMethod* owners_method,
251                                           uint32_t owners_dex_pc,
252                                           size_t num_waiters)
253       REQUIRES_SHARED(Locks::mutator_lock_);
254 
255   // Wait on a monitor until timeout, interrupt, or notification.  Used for Object.wait() and
256   // (somewhat indirectly) Thread.sleep() and Thread.join().
257   //
258   // If another thread calls Thread.interrupt(), we throw InterruptedException and return
259   // immediately if one of the following are true:
260   //  - blocked in wait(), wait(long), or wait(long, int) methods of Object
261   //  - blocked in join(), join(long), or join(long, int) methods of Thread
262   //  - blocked in sleep(long), or sleep(long, int) methods of Thread
263   // Otherwise, we set the "interrupted" flag.
264   //
265   // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
266   // throws the appropriate exception if it isn't.
267   //
268   // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
269   // a loop.  This appears to derive from concerns about pthread_cond_wait() on multiprocessor
270   // systems.  Some commentary on the web casts doubt on whether these can/should occur.
271   //
272   // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
273   // of the 32-bit time epoch.
274   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
275       REQUIRES(monitor_lock_)
276       REQUIRES_SHARED(Locks::mutator_lock_);
277 
278   // Translates the provided method and pc into its declaring class' source file and line number.
279   static void TranslateLocation(ArtMethod* method, uint32_t pc,
280                                 const char** source_file,
281                                 int32_t* line_number)
282       REQUIRES_SHARED(Locks::mutator_lock_);
283 
284   // Provides no memory ordering guarantees.
285   uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
286 
287   // Set locking_method_ and locking_dex_pc_ corresponding to owner's current stack.
288   // owner is either self or suspended.
289   void SetLockingMethod(Thread* owner) REQUIRES(monitor_lock_)
290       REQUIRES_SHARED(Locks::mutator_lock_);
291 
292   // The same, but without checking for a proxy method. Currently requires owner == self.
293   void SetLockingMethodNoProxy(Thread* owner) REQUIRES(monitor_lock_)
294       REQUIRES_SHARED(Locks::mutator_lock_);
295 
296   // Support for systrace output of monitor operations.
297   ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
298                                               ObjPtr<mirror::Object> obj,
299                                               bool is_wait)
300       REQUIRES_SHARED(Locks::mutator_lock_);
301   static void AtraceMonitorLockImpl(Thread* self,
302                                     ObjPtr<mirror::Object> obj,
303                                     bool is_wait)
304       REQUIRES_SHARED(Locks::mutator_lock_);
305   ALWAYS_INLINE static void AtraceMonitorUnlock();
306 
307   static uint32_t lock_profiling_threshold_;
308   static uint32_t stack_dump_lock_profiling_threshold_;
309   static bool capture_method_eagerly_;
310 
311   // Holding the monitor N times is represented by holding monitor_lock_ N times.
312   Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
313 
314   // Pretend to unlock monitor lock.
FakeUnlockMonitorLock()315   void FakeUnlockMonitorLock() RELEASE(monitor_lock_) NO_THREAD_SAFETY_ANALYSIS {}
316 
317   // Number of threads either waiting on the condition or waiting on a contended
318   // monitor acquisition. Prevents deflation.
319   std::atomic<size_t> num_waiters_;
320 
321   // Which thread currently owns the lock? monitor_lock_ only keeps the tid.
322   // Only set while holding monitor_lock_. Non-locking readers only use it to
323   // compare to self or for debugging.
324   std::atomic<Thread*> owner_;
325 
326   // Owner's recursive lock depth. Owner_ non-null, and lock_count_ == 0 ==> held once.
327   unsigned int lock_count_ GUARDED_BY(monitor_lock_);
328 
329   // Owner's recursive lock depth is given by monitor_lock_.GetDepth().
330 
331   // What object are we part of. This is a weak root. Do not access
332   // this directly, use GetObject() to read it so it will be guarded
333   // by a read barrier.
334   GcRoot<mirror::Object> obj_;
335 
336   // Threads currently waiting on this monitor.
337   Thread* wait_set_ GUARDED_BY(monitor_lock_);
338 
339   // Threads that were waiting on this monitor, but are now contending on it.
340   Thread* wake_set_ GUARDED_BY(monitor_lock_);
341 
342   // Stored object hash code, generated lazily by GetHashCode.
343   AtomicInteger hash_code_;
344 
345   // Data structure used to remember the method and dex pc of a recent holder of the
346   // lock. Used for tracing and contention reporting. Setting these is expensive, since it
347   // involves a partial stack walk. We set them only as follows, to minimize the cost:
348   // - If tracing is enabled, they are needed immediately when we first notice contention, so we
349   //   set them unconditionally when a monitor is acquired.
350   // - If contention reporting is enabled, we use the lock_owner_request_ field to have the
351   //   contending thread request them. The current owner then sets them when releasing the monitor,
352   //   making them available when the contending thread acquires the monitor.
353   // - If both are enabled, we blindly do both. This usually prevents us from switching between
354   //   reporting the end and beginning of critical sections for contention logging when tracing is
355   //   enabled.  We expect that tracing overhead is normally much higher than for contention
356   //   logging, so the added cost should be small. It also minimizes glitches when enabling and
357   //   disabling traces.
358   // We're tolerant of missing information. E.g. when tracing is initially turned on, we may
359   // not have the lock holder information if the holder acquired the lock with tracing off.
360   //
361   // We make this data unconditionally atomic; for contention logging all accesses are in fact
362   // protected by the monitor, but for tracing, reads are not. Writes are always
363   // protected by the monitor.
364   //
365   // The fields are always accessed without memory ordering. We store a checksum, and reread if
366   // the checksum doesn't correspond to the values.  This results in values that are correct with
367   // very high probability, but not certainty.
368   //
369   // If we need lock_owner information for a certain thread for contenion logging, we store its
370   // tid in lock_owner_request_. To satisfy the request, we store lock_owner_tid_,
371   // lock_owner_method_, and lock_owner_dex_pc_ and the corresponding checksum while holding the
372   // monitor.
373   //
374   // At all times, either lock_owner_ is zero, the checksum is valid, or a thread is actively
375   // in the process of establishing one of those states. Only one thread at a time can be actively
376   // establishing such a state, since writes are protected by the monitor.
377   std::atomic<Thread*> lock_owner_;  // *lock_owner_ may no longer exist!
378   std::atomic<ArtMethod*> lock_owner_method_;
379   std::atomic<uint32_t> lock_owner_dex_pc_;
380   std::atomic<uintptr_t> lock_owner_sum_;
381 
382   // Request lock owner save method and dex_pc. Written asynchronously.
383   std::atomic<Thread*> lock_owner_request_;
384 
385   // Compute method, dex pc, and tid "checksum".
386   uintptr_t LockOwnerInfoChecksum(ArtMethod* m, uint32_t dex_pc, Thread* t);
387 
388   // Set owning method, dex pc, and tid. owner_ field is set and points to current thread.
389   void SetLockOwnerInfo(ArtMethod* method, uint32_t dex_pc, Thread* t)
390       REQUIRES(monitor_lock_);
391 
392   // Get owning method and dex pc for the given thread, if available.
393   void GetLockOwnerInfo(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc, Thread* t);
394 
395   // Do the same, while holding the monitor. There are no concurrent updates.
396   void GetLockOwnerInfoLocked(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc,
397                               uint32_t thread_id)
398       REQUIRES(monitor_lock_);
399 
400   // We never clear lock_owner method and dex pc. Since it often reflects
401   // ownership when we last detected contention, it may be inconsistent with owner_
402   // and not 100% reliable. For lock contention monitoring, in the absence of tracing,
403   // there is a small risk that the current owner may finish before noticing the request,
404   // or the information will be overwritten by another intervening request and monitor
405   // release, so it's also not 100% reliable. But if we report information at all, it
406   // should generally (modulo accidental checksum matches) pertain to to an acquisition of the
407   // right monitor by the right thread, so it's extremely unlikely to be seriously misleading.
408   // Since we track threads by a pointer to the Thread structure, there is a small chance we may
409   // confuse threads allocated at the same exact address, if a contending thread dies before
410   // we inquire about it.
411 
412   // Check for and act on a pending lock_owner_request_
413   void CheckLockOwnerRequest(Thread* self)
414       REQUIRES(monitor_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
415 
416   // The denser encoded version of this monitor as stored in the lock word.
417   MonitorId monitor_id_;
418 
419 #ifdef __LP64__
420   // Free list for monitor pool.
421   Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
422 #endif
423 
424   friend class MonitorInfo;
425   friend class MonitorList;
426   friend class MonitorPool;
427   friend class mirror::Object;
428   DISALLOW_COPY_AND_ASSIGN(Monitor);
429 };
430 
431 class MonitorList {
432  public:
433   MonitorList();
434   ~MonitorList();
435 
436   void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
437 
438   void SweepMonitorList(IsMarkedVisitor* visitor)
439       REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
440   void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
441   void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
442   void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
443   // Returns how many monitors were deflated.
444   size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
445   size_t Size() REQUIRES(!monitor_list_lock_);
446 
447   typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
448 
449  private:
450   // During sweeping we may free an object and on a separate thread have an object created using
451   // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
452   // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
453   // the object wasn't marked when sweeping began.
454   bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
455   Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
456   ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
457   Monitors list_ GUARDED_BY(monitor_list_lock_);
458 
459   friend class Monitor;
460   DISALLOW_COPY_AND_ASSIGN(MonitorList);
461 };
462 
463 // Collects information about the current state of an object's monitor.
464 // This is very unsafe, and must only be called when all threads are suspended.
465 // For use only by the JDWP implementation.
466 class MonitorInfo {
467  public:
MonitorInfo()468   MonitorInfo() : owner_(nullptr), entry_count_(0) {}
469   MonitorInfo(const MonitorInfo&) = default;
470   MonitorInfo& operator=(const MonitorInfo&) = default;
471   explicit MonitorInfo(ObjPtr<mirror::Object> o) REQUIRES(Locks::mutator_lock_);
472 
473   Thread* owner_;
474   size_t entry_count_;
475   std::vector<Thread*> waiters_;
476 };
477 
478 }  // namespace art
479 
480 #endif  // ART_RUNTIME_MONITOR_H_
481