1 //===-- tsan_sync.cc ------------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_sync.h"
15 #include "tsan_rtl.h"
16 #include "tsan_mman.h"
17 
18 namespace __tsan {
19 
20 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21 
SyncVar()22 SyncVar::SyncVar()
23     : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
24   Reset(0);
25 }
26 
Init(ThreadState * thr,uptr pc,uptr addr,u64 uid)27 void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28   this->addr = addr;
29   this->uid = uid;
30   this->next = 0;
31 
32   creation_stack_id = 0;
33   if (kCppMode)  // Go does not use them
34     creation_stack_id = CurrentStackId(thr, pc);
35   if (common_flags()->detect_deadlocks)
36     DDMutexInit(thr, pc, this);
37 }
38 
Reset(ThreadState * thr)39 void SyncVar::Reset(ThreadState *thr) {
40   uid = 0;
41   creation_stack_id = 0;
42   owner_tid = kInvalidTid;
43   last_lock = 0;
44   recursion = 0;
45   is_rw = 0;
46   is_recursive = 0;
47   is_broken = 0;
48   is_linker_init = 0;
49 
50   if (thr == 0) {
51     CHECK_EQ(clock.size(), 0);
52     CHECK_EQ(read_clock.size(), 0);
53   } else {
54     clock.Reset(&thr->clock_cache);
55     read_clock.Reset(&thr->clock_cache);
56   }
57 }
58 
MetaMap()59 MetaMap::MetaMap() {
60   atomic_store(&uid_gen_, 0, memory_order_relaxed);
61 }
62 
AllocBlock(ThreadState * thr,uptr pc,uptr p,uptr sz)63 void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
64   u32 idx = block_alloc_.Alloc(&thr->block_cache);
65   MBlock *b = block_alloc_.Map(idx);
66   b->siz = sz;
67   b->tid = thr->tid;
68   b->stk = CurrentStackId(thr, pc);
69   u32 *meta = MemToMeta(p);
70   DCHECK_EQ(*meta, 0);
71   *meta = idx | kFlagBlock;
72 }
73 
FreeBlock(ThreadState * thr,uptr pc,uptr p)74 uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
75   MBlock* b = GetBlock(p);
76   if (b == 0)
77     return 0;
78   uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
79   FreeRange(thr, pc, p, sz);
80   return sz;
81 }
82 
FreeRange(ThreadState * thr,uptr pc,uptr p,uptr sz)83 bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
84   bool has_something = false;
85   u32 *meta = MemToMeta(p);
86   u32 *end = MemToMeta(p + sz);
87   if (end == meta)
88     end++;
89   for (; meta < end; meta++) {
90     u32 idx = *meta;
91     if (idx == 0) {
92       // Note: don't write to meta in this case -- the block can be huge.
93       continue;
94     }
95     *meta = 0;
96     has_something = true;
97     while (idx != 0) {
98       if (idx & kFlagBlock) {
99         block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
100         break;
101       } else if (idx & kFlagSync) {
102         DCHECK(idx & kFlagSync);
103         SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
104         u32 next = s->next;
105         s->Reset(thr);
106         sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
107         idx = next;
108       } else {
109         CHECK(0);
110       }
111     }
112   }
113   return has_something;
114 }
115 
116 // ResetRange removes all meta objects from the range.
117 // It is called for large mmap-ed regions. The function is best-effort wrt
118 // freeing of meta objects, because we don't want to page in the whole range
119 // which can be huge. The function probes pages one-by-one until it finds a page
120 // without meta objects, at this point it stops freeing meta objects. Because
121 // thread stacks grow top-down, we do the same starting from end as well.
ResetRange(ThreadState * thr,uptr pc,uptr p,uptr sz)122 void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
123   const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
124   const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
125   if (sz <= 4 * kPageSize) {
126     // If the range is small, just do the normal free procedure.
127     FreeRange(thr, pc, p, sz);
128     return;
129   }
130   // First, round both ends of the range to page size.
131   uptr diff = RoundUp(p, kPageSize) - p;
132   if (diff != 0) {
133     FreeRange(thr, pc, p, diff);
134     p += diff;
135     sz -= diff;
136   }
137   diff = p + sz - RoundDown(p + sz, kPageSize);
138   if (diff != 0) {
139     FreeRange(thr, pc, p + sz - diff, diff);
140     sz -= diff;
141   }
142   // Now we must have a non-empty page-aligned range.
143   CHECK_GT(sz, 0);
144   CHECK_EQ(p, RoundUp(p, kPageSize));
145   CHECK_EQ(sz, RoundUp(sz, kPageSize));
146   const uptr p0 = p;
147   const uptr sz0 = sz;
148   // Probe start of the range.
149   while (sz > 0) {
150     bool has_something = FreeRange(thr, pc, p, kPageSize);
151     p += kPageSize;
152     sz -= kPageSize;
153     if (!has_something)
154       break;
155   }
156   // Probe end of the range.
157   while (sz > 0) {
158     bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
159     sz -= kPageSize;
160     if (!has_something)
161       break;
162   }
163   // Finally, page out the whole range (including the parts that we've just
164   // freed). Note: we can't simply madvise, because we need to leave a zeroed
165   // range (otherwise __tsan_java_move can crash if it encounters a left-over
166   // meta objects in java heap).
167   uptr metap = (uptr)MemToMeta(p0);
168   uptr metasz = sz0 / kMetaRatio;
169   UnmapOrDie((void*)metap, metasz);
170   MmapFixedNoReserve(metap, metasz);
171 }
172 
GetBlock(uptr p)173 MBlock* MetaMap::GetBlock(uptr p) {
174   u32 *meta = MemToMeta(p);
175   u32 idx = *meta;
176   for (;;) {
177     if (idx == 0)
178       return 0;
179     if (idx & kFlagBlock)
180       return block_alloc_.Map(idx & ~kFlagMask);
181     DCHECK(idx & kFlagSync);
182     SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
183     idx = s->next;
184   }
185 }
186 
GetOrCreateAndLock(ThreadState * thr,uptr pc,uptr addr,bool write_lock)187 SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
188                               uptr addr, bool write_lock) {
189   return GetAndLock(thr, pc, addr, write_lock, true);
190 }
191 
GetIfExistsAndLock(uptr addr)192 SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
193   return GetAndLock(0, 0, addr, true, false);
194 }
195 
GetAndLock(ThreadState * thr,uptr pc,uptr addr,bool write_lock,bool create)196 SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
197                              uptr addr, bool write_lock, bool create) {
198   u32 *meta = MemToMeta(addr);
199   u32 idx0 = *meta;
200   u32 myidx = 0;
201   SyncVar *mys = 0;
202   for (;;) {
203     u32 idx = idx0;
204     for (;;) {
205       if (idx == 0)
206         break;
207       if (idx & kFlagBlock)
208         break;
209       DCHECK(idx & kFlagSync);
210       SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
211       if (s->addr == addr) {
212         if (myidx != 0) {
213           mys->Reset(thr);
214           sync_alloc_.Free(&thr->sync_cache, myidx);
215         }
216         if (write_lock)
217           s->mtx.Lock();
218         else
219           s->mtx.ReadLock();
220         return s;
221       }
222       idx = s->next;
223     }
224     if (!create)
225       return 0;
226     if (*meta != idx0) {
227       idx0 = *meta;
228       continue;
229     }
230 
231     if (myidx == 0) {
232       const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
233       myidx = sync_alloc_.Alloc(&thr->sync_cache);
234       mys = sync_alloc_.Map(myidx);
235       mys->Init(thr, pc, addr, uid);
236     }
237     mys->next = idx0;
238     if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
239         myidx | kFlagSync, memory_order_release)) {
240       if (write_lock)
241         mys->mtx.Lock();
242       else
243         mys->mtx.ReadLock();
244       return mys;
245     }
246   }
247 }
248 
MoveMemory(uptr src,uptr dst,uptr sz)249 void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
250   // src and dst can overlap,
251   // there are no concurrent accesses to the regions (e.g. stop-the-world).
252   CHECK_NE(src, dst);
253   CHECK_NE(sz, 0);
254   uptr diff = dst - src;
255   u32 *src_meta = MemToMeta(src);
256   u32 *dst_meta = MemToMeta(dst);
257   u32 *src_meta_end = MemToMeta(src + sz);
258   uptr inc = 1;
259   if (dst > src) {
260     src_meta = MemToMeta(src + sz) - 1;
261     dst_meta = MemToMeta(dst + sz) - 1;
262     src_meta_end = MemToMeta(src) - 1;
263     inc = -1;
264   }
265   for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
266     CHECK_EQ(*dst_meta, 0);
267     u32 idx = *src_meta;
268     *src_meta = 0;
269     *dst_meta = idx;
270     // Patch the addresses in sync objects.
271     while (idx != 0) {
272       if (idx & kFlagBlock)
273         break;
274       CHECK(idx & kFlagSync);
275       SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
276       s->addr += diff;
277       idx = s->next;
278     }
279   }
280 }
281 
OnThreadIdle(ThreadState * thr)282 void MetaMap::OnThreadIdle(ThreadState *thr) {
283   block_alloc_.FlushCache(&thr->block_cache);
284   sync_alloc_.FlushCache(&thr->sync_cache);
285 }
286 
287 }  // namespace __tsan
288