1 //===-- tsan_test_util_posix.cc -------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // Test utils, Linux, FreeBSD and Darwin implementation.
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "tsan_interface.h"
17 #include "tsan_test_util.h"
18 #include "tsan_report.h"
19 
20 #include "gtest/gtest.h"
21 
22 #include <assert.h>
23 #include <pthread.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <errno.h>
29 
30 using namespace __tsan;  // NOLINT
31 
32 static __thread bool expect_report;
33 static __thread bool expect_report_reported;
34 static __thread ReportType expect_report_type;
35 
36 #ifdef __APPLE__
37 #define __interceptor_memcpy wrap_memcpy
38 #define __interceptor_memset wrap_memset
39 #define __interceptor_pthread_create wrap_pthread_create
40 #define __interceptor_pthread_join wrap_pthread_join
41 #define __interceptor_pthread_detach wrap_pthread_detach
42 #define __interceptor_pthread_mutex_init wrap_pthread_mutex_init
43 #define __interceptor_pthread_mutex_lock wrap_pthread_mutex_lock
44 #define __interceptor_pthread_mutex_unlock wrap_pthread_mutex_unlock
45 #define __interceptor_pthread_mutex_destroy wrap_pthread_mutex_destroy
46 #define __interceptor_pthread_mutex_trylock wrap_pthread_mutex_trylock
47 #define __interceptor_pthread_rwlock_init wrap_pthread_rwlock_init
48 #define __interceptor_pthread_rwlock_destroy wrap_pthread_rwlock_destroy
49 #define __interceptor_pthread_rwlock_trywrlock wrap_pthread_rwlock_trywrlock
50 #define __interceptor_pthread_rwlock_wrlock wrap_pthread_rwlock_wrlock
51 #define __interceptor_pthread_rwlock_unlock wrap_pthread_rwlock_unlock
52 #define __interceptor_pthread_rwlock_rdlock wrap_pthread_rwlock_rdlock
53 #define __interceptor_pthread_rwlock_tryrdlock wrap_pthread_rwlock_tryrdlock
54 #endif
55 
56 extern "C" void *__interceptor_memcpy(void *, const void *, uptr);
57 extern "C" void *__interceptor_memset(void *, int, uptr);
58 extern "C" int __interceptor_pthread_create(pthread_t *thread,
59                                             const pthread_attr_t *attr,
60                                             void *(*start_routine)(void *),
61                                             void *arg);
62 extern "C" int __interceptor_pthread_join(pthread_t thread, void **value_ptr);
63 extern "C" int __interceptor_pthread_detach(pthread_t thread);
64 
65 extern "C" int __interceptor_pthread_mutex_init(
66     pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
67 extern "C" int __interceptor_pthread_mutex_lock(pthread_mutex_t *mutex);
68 extern "C" int __interceptor_pthread_mutex_unlock(pthread_mutex_t *mutex);
69 extern "C" int __interceptor_pthread_mutex_destroy(pthread_mutex_t *mutex);
70 extern "C" int __interceptor_pthread_mutex_trylock(pthread_mutex_t *mutex);
71 
72 extern "C" int __interceptor_pthread_rwlock_init(
73     pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
74 extern "C" int __interceptor_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
75 extern "C" int __interceptor_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
76 extern "C" int __interceptor_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
77 extern "C" int __interceptor_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
78 extern "C" int __interceptor_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
79 extern "C" int __interceptor_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
80 
81 
BeforeInitThread(void * param)82 static void *BeforeInitThread(void *param) {
83   (void)param;
84   return 0;
85 }
86 
AtExit()87 static void AtExit() {
88 }
89 
TestMutexBeforeInit()90 void TestMutexBeforeInit() {
91   // Mutexes must be usable before __tsan_init();
92   pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
93   __interceptor_pthread_mutex_lock(&mtx);
94   __interceptor_pthread_mutex_unlock(&mtx);
95   __interceptor_pthread_mutex_destroy(&mtx);
96   pthread_t thr;
97   __interceptor_pthread_create(&thr, 0, BeforeInitThread, 0);
98   __interceptor_pthread_join(thr, 0);
99   atexit(AtExit);
100 }
101 
102 namespace __tsan {
OnReport(const ReportDesc * rep,bool suppressed)103 bool OnReport(const ReportDesc *rep, bool suppressed) {
104   if (expect_report) {
105     if (rep->typ != expect_report_type) {
106       printf("Expected report of type %d, got type %d\n",
107              (int)expect_report_type, (int)rep->typ);
108       EXPECT_FALSE("Wrong report type");
109       return false;
110     }
111   } else {
112     EXPECT_FALSE("Unexpected report");
113     return false;
114   }
115   expect_report_reported = true;
116   return true;
117 }
118 }  // namespace __tsan
119 
allocate_addr(int size,int offset_from_aligned=0)120 static void* allocate_addr(int size, int offset_from_aligned = 0) {
121   static uintptr_t foo;
122   static atomic_uintptr_t uniq = {(uintptr_t)&foo};  // Some real address.
123   const int kAlign = 16;
124   CHECK(offset_from_aligned < kAlign);
125   size = (size + 2 * kAlign) & ~(kAlign - 1);
126   uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
127   return (void*)(addr + offset_from_aligned);
128 }
129 
MemLoc(int offset_from_aligned)130 MemLoc::MemLoc(int offset_from_aligned)
131   : loc_(allocate_addr(16, offset_from_aligned)) {
132 }
133 
~MemLoc()134 MemLoc::~MemLoc() {
135 }
136 
Mutex(Type type)137 Mutex::Mutex(Type type)
138   : alive_()
139   , type_(type) {
140 }
141 
~Mutex()142 Mutex::~Mutex() {
143   CHECK(!alive_);
144 }
145 
Init()146 void Mutex::Init() {
147   CHECK(!alive_);
148   alive_ = true;
149   if (type_ == Normal)
150     CHECK_EQ(__interceptor_pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
151 #ifndef __APPLE__
152   else if (type_ == Spin)
153     CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
154 #endif
155   else if (type_ == RW)
156     CHECK_EQ(__interceptor_pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
157   else
158     CHECK(0);
159 }
160 
StaticInit()161 void Mutex::StaticInit() {
162   CHECK(!alive_);
163   CHECK(type_ == Normal);
164   alive_ = true;
165   pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
166   memcpy(mtx_, &tmp, sizeof(tmp));
167 }
168 
Destroy()169 void Mutex::Destroy() {
170   CHECK(alive_);
171   alive_ = false;
172   if (type_ == Normal)
173     CHECK_EQ(__interceptor_pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
174 #ifndef __APPLE__
175   else if (type_ == Spin)
176     CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
177 #endif
178   else if (type_ == RW)
179     CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
180 }
181 
Lock()182 void Mutex::Lock() {
183   CHECK(alive_);
184   if (type_ == Normal)
185     CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
186 #ifndef __APPLE__
187   else if (type_ == Spin)
188     CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
189 #endif
190   else if (type_ == RW)
191     CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
192 }
193 
TryLock()194 bool Mutex::TryLock() {
195   CHECK(alive_);
196   if (type_ == Normal)
197     return __interceptor_pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
198 #ifndef __APPLE__
199   else if (type_ == Spin)
200     return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
201 #endif
202   else if (type_ == RW)
203     return __interceptor_pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
204   return false;
205 }
206 
Unlock()207 void Mutex::Unlock() {
208   CHECK(alive_);
209   if (type_ == Normal)
210     CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
211 #ifndef __APPLE__
212   else if (type_ == Spin)
213     CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
214 #endif
215   else if (type_ == RW)
216     CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
217 }
218 
ReadLock()219 void Mutex::ReadLock() {
220   CHECK(alive_);
221   CHECK(type_ == RW);
222   CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
223 }
224 
TryReadLock()225 bool Mutex::TryReadLock() {
226   CHECK(alive_);
227   CHECK(type_ == RW);
228   return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) ==  0;
229 }
230 
ReadUnlock()231 void Mutex::ReadUnlock() {
232   CHECK(alive_);
233   CHECK(type_ == RW);
234   CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
235 }
236 
237 struct Event {
238   enum Type {
239     SHUTDOWN,
240     READ,
241     WRITE,
242     VPTR_UPDATE,
243     CALL,
244     RETURN,
245     MUTEX_CREATE,
246     MUTEX_DESTROY,
247     MUTEX_LOCK,
248     MUTEX_TRYLOCK,
249     MUTEX_UNLOCK,
250     MUTEX_READLOCK,
251     MUTEX_TRYREADLOCK,
252     MUTEX_READUNLOCK,
253     MEMCPY,
254     MEMSET
255   };
256   Type type;
257   void *ptr;
258   uptr arg;
259   uptr arg2;
260   bool res;
261   bool expect_report;
262   ReportType report_type;
263 
EventEvent264   Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
265     : type(type)
266     , ptr(const_cast<void*>(ptr))
267     , arg(arg)
268     , arg2(arg2)
269     , res()
270     , expect_report()
271     , report_type() {
272   }
273 
ExpectReportEvent274   void ExpectReport(ReportType type) {
275     expect_report = true;
276     report_type = type;
277   }
278 };
279 
280 struct ScopedThread::Impl {
281   pthread_t thread;
282   bool main;
283   bool detached;
284   atomic_uintptr_t event;  // Event*
285 
286   static void *ScopedThreadCallback(void *arg);
287   void send(Event *ev);
288   void HandleEvent(Event *ev);
289 };
290 
HandleEvent(Event * ev)291 void ScopedThread::Impl::HandleEvent(Event *ev) {
292   CHECK_EQ(expect_report, false);
293   expect_report = ev->expect_report;
294   expect_report_reported = false;
295   expect_report_type = ev->report_type;
296   switch (ev->type) {
297   case Event::READ:
298   case Event::WRITE: {
299     void (*tsan_mop)(void *addr) = 0;
300     if (ev->type == Event::READ) {
301       switch (ev->arg /*size*/) {
302         case 1: tsan_mop = __tsan_read1; break;
303         case 2: tsan_mop = __tsan_read2; break;
304         case 4: tsan_mop = __tsan_read4; break;
305         case 8: tsan_mop = __tsan_read8; break;
306         case 16: tsan_mop = __tsan_read16; break;
307       }
308     } else {
309       switch (ev->arg /*size*/) {
310         case 1: tsan_mop = __tsan_write1; break;
311         case 2: tsan_mop = __tsan_write2; break;
312         case 4: tsan_mop = __tsan_write4; break;
313         case 8: tsan_mop = __tsan_write8; break;
314         case 16: tsan_mop = __tsan_write16; break;
315       }
316     }
317     CHECK_NE(tsan_mop, 0);
318 #if defined(__FreeBSD__) || defined(__APPLE__)
319     const int ErrCode = ESOCKTNOSUPPORT;
320 #else
321     const int ErrCode = ECHRNG;
322 #endif
323     errno = ErrCode;
324     tsan_mop(ev->ptr);
325     CHECK_EQ(ErrCode, errno);  // In no case must errno be changed.
326     break;
327   }
328   case Event::VPTR_UPDATE:
329     __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
330     break;
331   case Event::CALL:
332     __tsan_func_entry((void*)((uptr)ev->ptr));
333     break;
334   case Event::RETURN:
335     __tsan_func_exit();
336     break;
337   case Event::MUTEX_CREATE:
338     static_cast<Mutex*>(ev->ptr)->Init();
339     break;
340   case Event::MUTEX_DESTROY:
341     static_cast<Mutex*>(ev->ptr)->Destroy();
342     break;
343   case Event::MUTEX_LOCK:
344     static_cast<Mutex*>(ev->ptr)->Lock();
345     break;
346   case Event::MUTEX_TRYLOCK:
347     ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
348     break;
349   case Event::MUTEX_UNLOCK:
350     static_cast<Mutex*>(ev->ptr)->Unlock();
351     break;
352   case Event::MUTEX_READLOCK:
353     static_cast<Mutex*>(ev->ptr)->ReadLock();
354     break;
355   case Event::MUTEX_TRYREADLOCK:
356     ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
357     break;
358   case Event::MUTEX_READUNLOCK:
359     static_cast<Mutex*>(ev->ptr)->ReadUnlock();
360     break;
361   case Event::MEMCPY:
362     __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
363     break;
364   case Event::MEMSET:
365     __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
366     break;
367   default: CHECK(0);
368   }
369   if (expect_report && !expect_report_reported) {
370     printf("Missed expected report of type %d\n", (int)ev->report_type);
371     EXPECT_FALSE("Missed expected race");
372   }
373   expect_report = false;
374 }
375 
ScopedThreadCallback(void * arg)376 void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
377   __tsan_func_entry(__builtin_return_address(0));
378   Impl *impl = (Impl*)arg;
379   for (;;) {
380     Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
381     if (ev == 0) {
382       sched_yield();
383       continue;
384     }
385     if (ev->type == Event::SHUTDOWN) {
386       atomic_store(&impl->event, 0, memory_order_release);
387       break;
388     }
389     impl->HandleEvent(ev);
390     atomic_store(&impl->event, 0, memory_order_release);
391   }
392   __tsan_func_exit();
393   return 0;
394 }
395 
send(Event * e)396 void ScopedThread::Impl::send(Event *e) {
397   if (main) {
398     HandleEvent(e);
399   } else {
400     CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
401     atomic_store(&event, (uintptr_t)e, memory_order_release);
402     while (atomic_load(&event, memory_order_acquire) != 0)
403       sched_yield();
404   }
405 }
406 
ScopedThread(bool detached,bool main)407 ScopedThread::ScopedThread(bool detached, bool main) {
408   impl_ = new Impl;
409   impl_->main = main;
410   impl_->detached = detached;
411   atomic_store(&impl_->event, 0, memory_order_relaxed);
412   if (!main) {
413     pthread_attr_t attr;
414     pthread_attr_init(&attr);
415     pthread_attr_setdetachstate(
416         &attr, detached ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE);
417     pthread_attr_setstacksize(&attr, 64*1024);
418     __interceptor_pthread_create(&impl_->thread, &attr,
419         ScopedThread::Impl::ScopedThreadCallback, impl_);
420   }
421 }
422 
~ScopedThread()423 ScopedThread::~ScopedThread() {
424   if (!impl_->main) {
425     Event event(Event::SHUTDOWN);
426     impl_->send(&event);
427     if (!impl_->detached)
428       __interceptor_pthread_join(impl_->thread, 0);
429   }
430   delete impl_;
431 }
432 
Detach()433 void ScopedThread::Detach() {
434   CHECK(!impl_->main);
435   CHECK(!impl_->detached);
436   impl_->detached = true;
437   __interceptor_pthread_detach(impl_->thread);
438 }
439 
Access(void * addr,bool is_write,int size,bool expect_race)440 void ScopedThread::Access(void *addr, bool is_write,
441                           int size, bool expect_race) {
442   Event event(is_write ? Event::WRITE : Event::READ, addr, size);
443   if (expect_race)
444     event.ExpectReport(ReportTypeRace);
445   impl_->send(&event);
446 }
447 
VptrUpdate(const MemLoc & vptr,const MemLoc & new_val,bool expect_race)448 void ScopedThread::VptrUpdate(const MemLoc &vptr,
449                               const MemLoc &new_val,
450                               bool expect_race) {
451   Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
452   if (expect_race)
453     event.ExpectReport(ReportTypeRace);
454   impl_->send(&event);
455 }
456 
Call(void (* pc)())457 void ScopedThread::Call(void(*pc)()) {
458   Event event(Event::CALL, (void*)((uintptr_t)pc));
459   impl_->send(&event);
460 }
461 
Return()462 void ScopedThread::Return() {
463   Event event(Event::RETURN);
464   impl_->send(&event);
465 }
466 
Create(const Mutex & m)467 void ScopedThread::Create(const Mutex &m) {
468   Event event(Event::MUTEX_CREATE, &m);
469   impl_->send(&event);
470 }
471 
Destroy(const Mutex & m)472 void ScopedThread::Destroy(const Mutex &m) {
473   Event event(Event::MUTEX_DESTROY, &m);
474   impl_->send(&event);
475 }
476 
Lock(const Mutex & m)477 void ScopedThread::Lock(const Mutex &m) {
478   Event event(Event::MUTEX_LOCK, &m);
479   impl_->send(&event);
480 }
481 
TryLock(const Mutex & m)482 bool ScopedThread::TryLock(const Mutex &m) {
483   Event event(Event::MUTEX_TRYLOCK, &m);
484   impl_->send(&event);
485   return event.res;
486 }
487 
Unlock(const Mutex & m)488 void ScopedThread::Unlock(const Mutex &m) {
489   Event event(Event::MUTEX_UNLOCK, &m);
490   impl_->send(&event);
491 }
492 
ReadLock(const Mutex & m)493 void ScopedThread::ReadLock(const Mutex &m) {
494   Event event(Event::MUTEX_READLOCK, &m);
495   impl_->send(&event);
496 }
497 
TryReadLock(const Mutex & m)498 bool ScopedThread::TryReadLock(const Mutex &m) {
499   Event event(Event::MUTEX_TRYREADLOCK, &m);
500   impl_->send(&event);
501   return event.res;
502 }
503 
ReadUnlock(const Mutex & m)504 void ScopedThread::ReadUnlock(const Mutex &m) {
505   Event event(Event::MUTEX_READUNLOCK, &m);
506   impl_->send(&event);
507 }
508 
Memcpy(void * dst,const void * src,int size,bool expect_race)509 void ScopedThread::Memcpy(void *dst, const void *src, int size,
510                           bool expect_race) {
511   Event event(Event::MEMCPY, dst, (uptr)src, size);
512   if (expect_race)
513     event.ExpectReport(ReportTypeRace);
514   impl_->send(&event);
515 }
516 
Memset(void * dst,int val,int size,bool expect_race)517 void ScopedThread::Memset(void *dst, int val, int size,
518                           bool expect_race) {
519   Event event(Event::MEMSET, dst, val, size);
520   if (expect_race)
521     event.ExpectReport(ReportTypeRace);
522   impl_->send(&event);
523 }
524