1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MONITOR_H_
18 #define ART_RUNTIME_MONITOR_H_
19 
20 #include <pthread.h>
21 #include <stdint.h>
22 #include <stdlib.h>
23 
24 #include <iosfwd>
25 #include <list>
26 #include <vector>
27 
28 #include "atomic.h"
29 #include "base/allocator.h"
30 #include "base/mutex.h"
31 #include "gc_root.h"
32 #include "lock_word.h"
33 #include "object_callbacks.h"
34 #include "read_barrier_option.h"
35 #include "thread_state.h"
36 
37 namespace art {
38 
39 class ArtMethod;
40 class LockWord;
41 template<class T> class Handle;
42 class StackVisitor;
43 class Thread;
44 typedef uint32_t MonitorId;
45 
46 namespace mirror {
47   class Object;
48 }  // namespace mirror
49 
50 class Monitor {
51  public:
52   // The default number of spins that are done before thread suspension is used to forcibly inflate
53   // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
54   constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
55 
56   ~Monitor();
57 
58   static bool IsSensitiveThread();
59   static void Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)());
60 
61   // Return the thread id of the lock owner or 0 when there is no owner.
62   static uint32_t GetLockOwnerThreadId(mirror::Object* obj)
63       NO_THREAD_SAFETY_ANALYSIS;  // TODO: Reading lock owner without holding lock is racy.
64 
65   static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
66       EXCLUSIVE_LOCK_FUNCTION(obj)
67       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
68   static bool MonitorExit(Thread* thread, mirror::Object* obj)
69       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
70       UNLOCK_FUNCTION(obj);
71 
Notify(Thread * self,mirror::Object * obj)72   static void Notify(Thread* self, mirror::Object* obj)
73       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
74     DoNotify(self, obj, false);
75   }
NotifyAll(Thread * self,mirror::Object * obj)76   static void NotifyAll(Thread* self, mirror::Object* obj)
77       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
78     DoNotify(self, obj, true);
79   }
80 
81   // Object.wait().  Also called for class init.
82   static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
83                    bool interruptShouldThrow, ThreadState why)
84       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
85 
86   static void DescribeWait(std::ostream& os, const Thread* thread)
87       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
88       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
89 
90   // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
91   static mirror::Object* GetContendedMonitor(Thread* thread)
92       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
93 
94   // Calls 'callback' once for each lock held in the single stack frame represented by
95   // the current state of 'stack_visitor'.
96   // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
97   // is necessary when we have already aborted but want to dump the stack as much as we can.
98   static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
99                          void* callback_context, bool abort_on_failure = true)
100       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
101 
102   static bool IsValidLockWord(LockWord lock_word);
103 
104   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
GetObject()105   mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
106     return obj_.Read<kReadBarrierOption>();
107   }
108 
109   void SetObject(mirror::Object* object);
110 
GetOwner()111   Thread* GetOwner() const NO_THREAD_SAFETY_ANALYSIS {
112     return owner_;
113   }
114 
115   int32_t GetHashCode();
116 
117   bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
118 
HasHashCode()119   bool HasHashCode() const {
120     return hash_code_.LoadRelaxed() != 0;
121   }
122 
GetMonitorId()123   MonitorId GetMonitorId() const {
124     return monitor_id_;
125   }
126 
127   // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
128   static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
129                                 uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
130 
131   static bool Deflate(Thread* self, mirror::Object* obj)
132       // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
133       // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
134       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
135 
136 #ifndef __LP64__
new(size_t size)137   void* operator new(size_t size) {
138     // Align Monitor* as per the monitor ID field size in the lock word.
139     void* result;
140     int error = posix_memalign(&result, LockWord::kMonitorIdAlignment, size);
141     CHECK_EQ(error, 0) << strerror(error);
142     return result;
143   }
144 
delete(void * ptr)145   void operator delete(void* ptr) {
146     free(ptr);
147   }
148 #endif
149 
150  private:
151   explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
152         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
153   explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
154                    MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
155 
156   // Install the monitor into its object, may fail if another thread installs a different monitor
157   // first.
158   bool Install(Thread* self)
159       LOCKS_EXCLUDED(monitor_lock_)
160       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
161 
162   // Links a thread into a monitor's wait set.  The monitor lock must be held by the caller of this
163   // routine.
164   void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
165 
166   // Unlinks a thread from a monitor's wait set.  The monitor lock must be held by the caller of
167   // this routine.
168   void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
169 
170   // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
171   // calling thread must own the lock or the owner must be suspended. There's a race with other
172   // threads inflating the lock, installing hash codes and spurious failures. The caller should
173   // re-read the lock word following the call.
174   static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
175       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
176 
177   void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
178                           const char* owner_filename, uint32_t owner_line_number)
179       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
180 
181   static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner,
182                            Monitor* mon)
183       LOCKS_EXCLUDED(Locks::thread_list_lock_)
184       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
185 
186   void Lock(Thread* self)
187       LOCKS_EXCLUDED(monitor_lock_)
188       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
189   bool Unlock(Thread* thread)
190       LOCKS_EXCLUDED(monitor_lock_)
191       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
192 
193   static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
194       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
195 
196   void Notify(Thread* self)
197       LOCKS_EXCLUDED(monitor_lock_)
198       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
199 
200   void NotifyAll(Thread* self)
201       LOCKS_EXCLUDED(monitor_lock_)
202       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
203 
204 
205   // Wait on a monitor until timeout, interrupt, or notification.  Used for Object.wait() and
206   // (somewhat indirectly) Thread.sleep() and Thread.join().
207   //
208   // If another thread calls Thread.interrupt(), we throw InterruptedException and return
209   // immediately if one of the following are true:
210   //  - blocked in wait(), wait(long), or wait(long, int) methods of Object
211   //  - blocked in join(), join(long), or join(long, int) methods of Thread
212   //  - blocked in sleep(long), or sleep(long, int) methods of Thread
213   // Otherwise, we set the "interrupted" flag.
214   //
215   // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
216   // throws the appropriate exception if it isn't.
217   //
218   // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
219   // a loop.  This appears to derive from concerns about pthread_cond_wait() on multiprocessor
220   // systems.  Some commentary on the web casts doubt on whether these can/should occur.
221   //
222   // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
223   // of the 32-bit time epoch.
224   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
225       LOCKS_EXCLUDED(monitor_lock_)
226       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
227 
228   // Translates the provided method and pc into its declaring class' source file and line number.
229   void TranslateLocation(ArtMethod* method, uint32_t pc,
230                          const char** source_file, uint32_t* line_number) const
231       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
232 
233   uint32_t GetOwnerThreadId();
234 
235   static bool (*is_sensitive_thread_hook_)();
236   static uint32_t lock_profiling_threshold_;
237 
238   Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
239 
240   ConditionVariable monitor_contenders_ GUARDED_BY(monitor_lock_);
241 
242   // Number of people waiting on the condition.
243   size_t num_waiters_ GUARDED_BY(monitor_lock_);
244 
245   // Which thread currently owns the lock?
246   Thread* volatile owner_ GUARDED_BY(monitor_lock_);
247 
248   // Owner's recursive lock depth.
249   int lock_count_ GUARDED_BY(monitor_lock_);
250 
251   // What object are we part of. This is a weak root. Do not access
252   // this directly, use GetObject() to read it so it will be guarded
253   // by a read barrier.
254   GcRoot<mirror::Object> obj_;
255 
256   // Threads currently waiting on this monitor.
257   Thread* wait_set_ GUARDED_BY(monitor_lock_);
258 
259   // Stored object hash code, generated lazily by GetHashCode.
260   AtomicInteger hash_code_;
261 
262   // Method and dex pc where the lock owner acquired the lock, used when lock
263   // sampling is enabled. locking_method_ may be null if the lock is currently
264   // unlocked, or if the lock is acquired by the system when the stack is empty.
265   ArtMethod* locking_method_ GUARDED_BY(monitor_lock_);
266   uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_);
267 
268   // The denser encoded version of this monitor as stored in the lock word.
269   MonitorId monitor_id_;
270 
271 #ifdef __LP64__
272   // Free list for monitor pool.
273   Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
274 #endif
275 
276   friend class MonitorInfo;
277   friend class MonitorList;
278   friend class MonitorPool;
279   friend class mirror::Object;
280   DISALLOW_COPY_AND_ASSIGN(Monitor);
281 };
282 
283 class MonitorList {
284  public:
285   MonitorList();
286   ~MonitorList();
287 
288   void Add(Monitor* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
289 
290   void SweepMonitorList(IsMarkedCallback* callback, void* arg)
291       LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
292   void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
293   void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
294   void EnsureNewMonitorsDisallowed() LOCKS_EXCLUDED(monitor_list_lock_);
295   // Returns how many monitors were deflated.
296   size_t DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_)
297       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
298 
299   typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
300 
301  private:
302   // During sweeping we may free an object and on a separate thread have an object created using
303   // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
304   // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
305   // the object wasn't marked when sweeping began.
306   bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
307   Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
308   ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
309   Monitors list_ GUARDED_BY(monitor_list_lock_);
310 
311   friend class Monitor;
312   DISALLOW_COPY_AND_ASSIGN(MonitorList);
313 };
314 
315 // Collects information about the current state of an object's monitor.
316 // This is very unsafe, and must only be called when all threads are suspended.
317 // For use only by the JDWP implementation.
318 class MonitorInfo {
319  public:
320   explicit MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
321 
322   Thread* owner_;
323   size_t entry_count_;
324   std::vector<Thread*> waiters_;
325 
326  private:
327   DISALLOW_COPY_AND_ASSIGN(MonitorInfo);
328 };
329 
330 }  // namespace art
331 
332 #endif  // ART_RUNTIME_MONITOR_H_
333