1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MONITOR_H_
18 #define ART_RUNTIME_MONITOR_H_
19 
20 #include <pthread.h>
21 #include <stdint.h>
22 
23 #include <iosfwd>
24 #include <list>
25 #include <vector>
26 
27 #include "atomic.h"
28 #include "base/allocator.h"
29 #include "base/mutex.h"
30 #include "gc_root.h"
31 #include "object_callbacks.h"
32 #include "read_barrier_option.h"
33 #include "thread_state.h"
34 
35 namespace art {
36 
37 class LockWord;
38 template<class T> class Handle;
39 class Thread;
40 class StackVisitor;
41 typedef uint32_t MonitorId;
42 
43 namespace mirror {
44   class ArtMethod;
45   class Object;
46 }  // namespace mirror
47 
48 class Monitor {
49  public:
50   // The default number of spins that are done before thread suspension is used to forcibly inflate
51   // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
52   constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
53 
54   ~Monitor();
55 
56   static bool IsSensitiveThread();
57   static void Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread_hook)());
58 
59   // Return the thread id of the lock owner or 0 when there is no owner.
60   static uint32_t GetLockOwnerThreadId(mirror::Object* obj)
61       NO_THREAD_SAFETY_ANALYSIS;  // TODO: Reading lock owner without holding lock is racy.
62 
63   static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
64       EXCLUSIVE_LOCK_FUNCTION(obj)
65       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
66   static bool MonitorExit(Thread* thread, mirror::Object* obj)
67       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
68       UNLOCK_FUNCTION(obj);
69 
Notify(Thread * self,mirror::Object * obj)70   static void Notify(Thread* self, mirror::Object* obj)
71       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
72     DoNotify(self, obj, false);
73   }
NotifyAll(Thread * self,mirror::Object * obj)74   static void NotifyAll(Thread* self, mirror::Object* obj)
75       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
76     DoNotify(self, obj, true);
77   }
78   static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
79                    bool interruptShouldThrow, ThreadState why)
80       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
81 
82   static void DescribeWait(std::ostream& os, const Thread* thread)
83       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
84       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
85 
86   // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
87   static mirror::Object* GetContendedMonitor(Thread* thread)
88       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
89 
90   // Calls 'callback' once for each lock held in the single stack frame represented by
91   // the current state of 'stack_visitor'.
92   // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
93   // is necessary when we have already aborted but want to dump the stack as much as we can.
94   static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
95                          void* callback_context, bool abort_on_failure = true)
96       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
97 
98   static bool IsValidLockWord(LockWord lock_word);
99 
100   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
GetObject()101   mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
102     return obj_.Read<kReadBarrierOption>();
103   }
104 
105   void SetObject(mirror::Object* object);
106 
GetOwner()107   Thread* GetOwner() const NO_THREAD_SAFETY_ANALYSIS {
108     return owner_;
109   }
110 
111   int32_t GetHashCode();
112 
113   bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
114 
HasHashCode()115   bool HasHashCode() const {
116     return hash_code_.LoadRelaxed() != 0;
117   }
118 
GetMonitorId()119   MonitorId GetMonitorId() const {
120     return monitor_id_;
121   }
122 
123   // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
124   static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
125                                 uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
126 
127   static bool Deflate(Thread* self, mirror::Object* obj)
128       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
129 
130  private:
131   explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
132         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
133   explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
134                    MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
135 
136   // Install the monitor into its object, may fail if another thread installs a different monitor
137   // first.
138   bool Install(Thread* self)
139       LOCKS_EXCLUDED(monitor_lock_)
140       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
141 
142   void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
143   void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
144 
145   /*
146    * Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
147    * calling thread must own the lock or the owner must be suspended. There's a race with other
148    * threads inflating the lock, installing hash codes and spurious failures. The caller should
149    * re-read the lock word following the call.
150    */
151   static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
152       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
153 
154   void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
155                           const char* owner_filename, uint32_t owner_line_number)
156       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
157 
158   static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon)
159       LOCKS_EXCLUDED(Locks::thread_list_lock_)
160       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
161 
162   void Lock(Thread* self)
163       LOCKS_EXCLUDED(monitor_lock_)
164       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
165   bool Unlock(Thread* thread)
166       LOCKS_EXCLUDED(monitor_lock_)
167       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
168 
169   static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
170       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
171 
172   void Notify(Thread* self)
173       LOCKS_EXCLUDED(monitor_lock_)
174       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
175 
176   void NotifyAll(Thread* self)
177       LOCKS_EXCLUDED(monitor_lock_)
178       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
179 
180 
181   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
182       LOCKS_EXCLUDED(monitor_lock_)
183       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
184 
185   // Translates the provided method and pc into its declaring class' source file and line number.
186   void TranslateLocation(mirror::ArtMethod* method, uint32_t pc,
187                          const char** source_file, uint32_t* line_number) const
188       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
189 
190   uint32_t GetOwnerThreadId();
191 
192   static bool (*is_sensitive_thread_hook_)();
193   static uint32_t lock_profiling_threshold_;
194 
195   Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
196 
197   ConditionVariable monitor_contenders_ GUARDED_BY(monitor_lock_);
198 
199   // Number of people waiting on the condition.
200   size_t num_waiters_ GUARDED_BY(monitor_lock_);
201 
202   // Which thread currently owns the lock?
203   Thread* volatile owner_ GUARDED_BY(monitor_lock_);
204 
205   // Owner's recursive lock depth.
206   int lock_count_ GUARDED_BY(monitor_lock_);
207 
208   // What object are we part of. This is a weak root. Do not access
209   // this directly, use GetObject() to read it so it will be guarded
210   // by a read barrier.
211   GcRoot<mirror::Object> obj_;
212 
213   // Threads currently waiting on this monitor.
214   Thread* wait_set_ GUARDED_BY(monitor_lock_);
215 
216   // Stored object hash code, generated lazily by GetHashCode.
217   AtomicInteger hash_code_;
218 
219   // Method and dex pc where the lock owner acquired the lock, used when lock
220   // sampling is enabled. locking_method_ may be null if the lock is currently
221   // unlocked, or if the lock is acquired by the system when the stack is empty.
222   mirror::ArtMethod* locking_method_ GUARDED_BY(monitor_lock_);
223   uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_);
224 
225   // The denser encoded version of this monitor as stored in the lock word.
226   MonitorId monitor_id_;
227 
228 #ifdef __LP64__
229   // Free list for monitor pool.
230   Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
231 #endif
232 
233   friend class MonitorInfo;
234   friend class MonitorList;
235   friend class MonitorPool;
236   friend class mirror::Object;
237   DISALLOW_COPY_AND_ASSIGN(Monitor);
238 };
239 
240 class MonitorList {
241  public:
242   MonitorList();
243   ~MonitorList();
244 
245   void Add(Monitor* m);
246 
247   void SweepMonitorList(IsMarkedCallback* callback, void* arg)
248       LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
249   void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
250   void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
251   // Returns how many monitors were deflated.
252   size_t DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_)
253       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
254 
255   typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
256 
257  private:
258   // During sweeping we may free an object and on a separate thread have an object created using
259   // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
260   // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
261   // the object wasn't marked when sweeping began.
262   bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
263   Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
264   ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
265   Monitors list_ GUARDED_BY(monitor_list_lock_);
266 
267   friend class Monitor;
268   DISALLOW_COPY_AND_ASSIGN(MonitorList);
269 };
270 
271 // Collects information about the current state of an object's monitor.
272 // This is very unsafe, and must only be called when all threads are suspended.
273 // For use only by the JDWP implementation.
274 class MonitorInfo {
275  public:
276   explicit MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
277 
278   Thread* owner_;
279   size_t entry_count_;
280   std::vector<Thread*> waiters_;
281 
282  private:
283   DISALLOW_COPY_AND_ASSIGN(MonitorInfo);
284 };
285 
286 }  // namespace art
287 
288 #endif  // ART_RUNTIME_MONITOR_H_
289