1 //===-- tsan_mutex.cc -----------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_libc.h"
14 #include "tsan_mutex.h"
15 #include "tsan_platform.h"
16 #include "tsan_rtl.h"
17 
18 namespace __tsan {
19 
20 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
21 // Readers have preference, can possibly starvate writers.
22 
23 // The table fixes what mutexes can be locked under what mutexes.
24 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
25 // then Report mutex can be locked while under Threads mutex.
26 // The leaf mutexes can be locked under any other mutexes.
27 // Recursive locking is not supported.
28 #if SANITIZER_DEBUG && !SANITIZER_GO
29 const MutexType MutexTypeLeaf = (MutexType)-1;
30 static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
31   /*0  MutexTypeInvalid*/     {},
32   /*1  MutexTypeTrace*/       {MutexTypeLeaf},
33   /*2  MutexTypeThreads*/     {MutexTypeReport},
34   /*3  MutexTypeReport*/      {MutexTypeSyncVar,
35                                MutexTypeMBlock, MutexTypeJavaMBlock},
36   /*4  MutexTypeSyncVar*/     {MutexTypeDDetector},
37   /*5  MutexTypeSyncTab*/     {},  // unused
38   /*6  MutexTypeSlab*/        {MutexTypeLeaf},
39   /*7  MutexTypeAnnotations*/ {},
40   /*8  MutexTypeAtExit*/      {MutexTypeSyncVar},
41   /*9  MutexTypeMBlock*/      {MutexTypeSyncVar},
42   /*10 MutexTypeJavaMBlock*/  {MutexTypeSyncVar},
43   /*11 MutexTypeDDetector*/   {},
44 };
45 
46 static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
47 #endif
48 
InitializeMutex()49 void InitializeMutex() {
50 #if SANITIZER_DEBUG && !SANITIZER_GO
51   // Build the "can lock" adjacency matrix.
52   // If [i][j]==true, then one can lock mutex j while under mutex i.
53   const int N = MutexTypeCount;
54   int cnt[N] = {};
55   bool leaf[N] = {};
56   for (int i = 1; i < N; i++) {
57     for (int j = 0; j < N; j++) {
58       MutexType z = CanLockTab[i][j];
59       if (z == MutexTypeInvalid)
60         continue;
61       if (z == MutexTypeLeaf) {
62         CHECK(!leaf[i]);
63         leaf[i] = true;
64         continue;
65       }
66       CHECK(!CanLockAdj[i][(int)z]);
67       CanLockAdj[i][(int)z] = true;
68       cnt[i]++;
69     }
70   }
71   for (int i = 0; i < N; i++) {
72     CHECK(!leaf[i] || cnt[i] == 0);
73   }
74   // Add leaf mutexes.
75   for (int i = 0; i < N; i++) {
76     if (!leaf[i])
77       continue;
78     for (int j = 0; j < N; j++) {
79       if (i == j || leaf[j] || j == MutexTypeInvalid)
80         continue;
81       CHECK(!CanLockAdj[j][i]);
82       CanLockAdj[j][i] = true;
83     }
84   }
85   // Build the transitive closure.
86   bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
87   for (int i = 0; i < N; i++) {
88     for (int j = 0; j < N; j++) {
89       CanLockAdj2[i][j] = CanLockAdj[i][j];
90     }
91   }
92   for (int k = 0; k < N; k++) {
93     for (int i = 0; i < N; i++) {
94       for (int j = 0; j < N; j++) {
95         if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
96           CanLockAdj2[i][j] = true;
97         }
98       }
99     }
100   }
101 #if 0
102   Printf("Can lock graph:\n");
103   for (int i = 0; i < N; i++) {
104     for (int j = 0; j < N; j++) {
105       Printf("%d ", CanLockAdj[i][j]);
106     }
107     Printf("\n");
108   }
109   Printf("Can lock graph closure:\n");
110   for (int i = 0; i < N; i++) {
111     for (int j = 0; j < N; j++) {
112       Printf("%d ", CanLockAdj2[i][j]);
113     }
114     Printf("\n");
115   }
116 #endif
117   // Verify that the graph is acyclic.
118   for (int i = 0; i < N; i++) {
119     if (CanLockAdj2[i][i]) {
120       Printf("Mutex %d participates in a cycle\n", i);
121       Die();
122     }
123   }
124 #endif
125 }
126 
InternalDeadlockDetector()127 InternalDeadlockDetector::InternalDeadlockDetector() {
128   // Rely on zero initialization because some mutexes can be locked before ctor.
129 }
130 
131 #if SANITIZER_DEBUG && !SANITIZER_GO
Lock(MutexType t)132 void InternalDeadlockDetector::Lock(MutexType t) {
133   // Printf("LOCK %d @%zu\n", t, seq_ + 1);
134   CHECK_GT(t, MutexTypeInvalid);
135   CHECK_LT(t, MutexTypeCount);
136   u64 max_seq = 0;
137   u64 max_idx = MutexTypeInvalid;
138   for (int i = 0; i != MutexTypeCount; i++) {
139     if (locked_[i] == 0)
140       continue;
141     CHECK_NE(locked_[i], max_seq);
142     if (max_seq < locked_[i]) {
143       max_seq = locked_[i];
144       max_idx = i;
145     }
146   }
147   locked_[t] = ++seq_;
148   if (max_idx == MutexTypeInvalid)
149     return;
150   // Printf("  last %d @%zu\n", max_idx, max_seq);
151   if (!CanLockAdj[max_idx][t]) {
152     Printf("ThreadSanitizer: internal deadlock detected\n");
153     Printf("ThreadSanitizer: can't lock %d while under %zu\n",
154                t, (uptr)max_idx);
155     CHECK(0);
156   }
157 }
158 
Unlock(MutexType t)159 void InternalDeadlockDetector::Unlock(MutexType t) {
160   // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
161   CHECK(locked_[t]);
162   locked_[t] = 0;
163 }
164 
CheckNoLocks()165 void InternalDeadlockDetector::CheckNoLocks() {
166   for (int i = 0; i != MutexTypeCount; i++) {
167     CHECK_EQ(locked_[i], 0);
168   }
169 }
170 #endif
171 
CheckNoLocks(ThreadState * thr)172 void CheckNoLocks(ThreadState *thr) {
173 #if SANITIZER_DEBUG && !SANITIZER_GO
174   thr->internal_deadlock_detector.CheckNoLocks();
175 #endif
176 }
177 
178 const uptr kUnlocked = 0;
179 const uptr kWriteLock = 1;
180 const uptr kReadLock = 2;
181 
182 class Backoff {
183  public:
Backoff()184   Backoff()
185     : iter_() {
186   }
187 
Do()188   bool Do() {
189     if (iter_++ < kActiveSpinIters)
190       proc_yield(kActiveSpinCnt);
191     else
192       internal_sched_yield();
193     return true;
194   }
195 
Contention() const196   u64 Contention() const {
197     u64 active = iter_ % kActiveSpinIters;
198     u64 passive = iter_ - active;
199     return active + 10 * passive;
200   }
201 
202  private:
203   int iter_;
204   static const int kActiveSpinIters = 10;
205   static const int kActiveSpinCnt = 20;
206 };
207 
Mutex(MutexType type,StatType stat_type)208 Mutex::Mutex(MutexType type, StatType stat_type) {
209   CHECK_GT(type, MutexTypeInvalid);
210   CHECK_LT(type, MutexTypeCount);
211 #if SANITIZER_DEBUG
212   type_ = type;
213 #endif
214 #if TSAN_COLLECT_STATS
215   stat_type_ = stat_type;
216 #endif
217   atomic_store(&state_, kUnlocked, memory_order_relaxed);
218 }
219 
~Mutex()220 Mutex::~Mutex() {
221   CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
222 }
223 
Lock()224 void Mutex::Lock() {
225 #if SANITIZER_DEBUG && !SANITIZER_GO
226   cur_thread()->internal_deadlock_detector.Lock(type_);
227 #endif
228   uptr cmp = kUnlocked;
229   if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
230                                      memory_order_acquire))
231     return;
232   for (Backoff backoff; backoff.Do();) {
233     if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
234       cmp = kUnlocked;
235       if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
236                                        memory_order_acquire)) {
237 #if TSAN_COLLECT_STATS && !SANITIZER_GO
238         StatInc(cur_thread(), stat_type_, backoff.Contention());
239 #endif
240         return;
241       }
242     }
243   }
244 }
245 
Unlock()246 void Mutex::Unlock() {
247   uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
248   (void)prev;
249   DCHECK_NE(prev & kWriteLock, 0);
250 #if SANITIZER_DEBUG && !SANITIZER_GO
251   cur_thread()->internal_deadlock_detector.Unlock(type_);
252 #endif
253 }
254 
ReadLock()255 void Mutex::ReadLock() {
256 #if SANITIZER_DEBUG && !SANITIZER_GO
257   cur_thread()->internal_deadlock_detector.Lock(type_);
258 #endif
259   uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
260   if ((prev & kWriteLock) == 0)
261     return;
262   for (Backoff backoff; backoff.Do();) {
263     prev = atomic_load(&state_, memory_order_acquire);
264     if ((prev & kWriteLock) == 0) {
265 #if TSAN_COLLECT_STATS && !SANITIZER_GO
266       StatInc(cur_thread(), stat_type_, backoff.Contention());
267 #endif
268       return;
269     }
270   }
271 }
272 
ReadUnlock()273 void Mutex::ReadUnlock() {
274   uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
275   (void)prev;
276   DCHECK_EQ(prev & kWriteLock, 0);
277   DCHECK_GT(prev & ~kWriteLock, 0);
278 #if SANITIZER_DEBUG && !SANITIZER_GO
279   cur_thread()->internal_deadlock_detector.Unlock(type_);
280 #endif
281 }
282 
CheckLocked()283 void Mutex::CheckLocked() {
284   CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
285 }
286 
287 }  // namespace __tsan
288