1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // Main file (entry points) for the TSan run-time.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_symbolizer.h"
21 #include "tsan_defs.h"
22 #include "tsan_platform.h"
23 #include "tsan_rtl.h"
24 #include "tsan_mman.h"
25 #include "tsan_suppressions.h"
26 #include "tsan_symbolize.h"
27
28 #ifdef __SSE3__
29 // <emmintrin.h> transitively includes <stdlib.h>,
30 // and it's prohibited to include std headers into tsan runtime.
31 // So we do this dirty trick.
32 #define _MM_MALLOC_H_INCLUDED
33 #define __MM_MALLOC_H
34 #include <emmintrin.h>
35 typedef __m128i m128;
36 #endif
37
38 volatile int __tsan_resumed = 0;
39
__tsan_resume()40 extern "C" void __tsan_resume() {
41 __tsan_resumed = 1;
42 }
43
44 namespace __tsan {
45
46 #ifndef SANITIZER_GO
47 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
48 #endif
49 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
50 Context *ctx;
51
52 // Can be overriden by a front-end.
53 #ifdef TSAN_EXTERNAL_HOOKS
54 bool OnFinalize(bool failed);
55 void OnInitialize();
56 #else
57 SANITIZER_INTERFACE_ATTRIBUTE
OnFinalize(bool failed)58 bool WEAK OnFinalize(bool failed) {
59 return failed;
60 }
61 SANITIZER_INTERFACE_ATTRIBUTE
OnInitialize()62 void WEAK OnInitialize() {}
63 #endif
64
65 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
66
CreateThreadContext(u32 tid)67 static ThreadContextBase *CreateThreadContext(u32 tid) {
68 // Map thread trace when context is created.
69 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
70 const uptr hdr = GetThreadTraceHeader(tid);
71 MapThreadTrace(hdr, sizeof(Trace));
72 new((void*)hdr) Trace();
73 // We are going to use only a small part of the trace with the default
74 // value of history_size. However, the constructor writes to the whole trace.
75 // Unmap the unused part.
76 uptr hdr_end = hdr + sizeof(Trace);
77 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
78 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
79 if (hdr_end < hdr + sizeof(Trace))
80 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
81 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
82 return new(mem) ThreadContext(tid);
83 }
84
85 #ifndef SANITIZER_GO
86 static const u32 kThreadQuarantineSize = 16;
87 #else
88 static const u32 kThreadQuarantineSize = 64;
89 #endif
90
Context()91 Context::Context()
92 : initialized()
93 , report_mtx(MutexTypeReport, StatMtxReport)
94 , nreported()
95 , nmissed_expected()
96 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
97 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
98 , racy_stacks(MBlockRacyStacks)
99 , racy_addresses(MBlockRacyAddresses)
100 , fired_suppressions(8) {
101 }
102
103 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Context * ctx,int tid,int unique_id,u64 epoch,unsigned reuse_count,uptr stk_addr,uptr stk_size,uptr tls_addr,uptr tls_size)104 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
105 unsigned reuse_count,
106 uptr stk_addr, uptr stk_size,
107 uptr tls_addr, uptr tls_size)
108 : fast_state(tid, epoch)
109 // Do not touch these, rely on zero initialization,
110 // they may be accessed before the ctor.
111 // , ignore_reads_and_writes()
112 // , ignore_interceptors()
113 , clock(tid, reuse_count)
114 #ifndef SANITIZER_GO
115 , jmp_bufs(MBlockJmpBuf)
116 #endif
117 , tid(tid)
118 , unique_id(unique_id)
119 , stk_addr(stk_addr)
120 , stk_size(stk_size)
121 , tls_addr(tls_addr)
122 , tls_size(tls_size)
123 #ifndef SANITIZER_GO
124 , last_sleep_clock(tid)
125 #endif
126 {
127 }
128
129 #ifndef SANITIZER_GO
MemoryProfiler(Context * ctx,fd_t fd,int i)130 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
131 uptr n_threads;
132 uptr n_running_threads;
133 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
134 InternalScopedBuffer<char> buf(4096);
135 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
136 WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
137 }
138
BackgroundThread(void * arg)139 static void BackgroundThread(void *arg) {
140 // This is a non-initialized non-user thread, nothing to see here.
141 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
142 // enabled even when the thread function exits (e.g. during pthread thread
143 // shutdown code).
144 cur_thread()->ignore_interceptors++;
145 const u64 kMs2Ns = 1000 * 1000;
146
147 fd_t mprof_fd = kInvalidFd;
148 if (flags()->profile_memory && flags()->profile_memory[0]) {
149 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
150 mprof_fd = 1;
151 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
152 mprof_fd = 2;
153 } else {
154 InternalScopedString filename(kMaxPathLength);
155 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
156 fd_t fd = OpenFile(filename.data(), WrOnly);
157 if (fd == kInvalidFd) {
158 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
159 &filename[0]);
160 } else {
161 mprof_fd = fd;
162 }
163 }
164 }
165
166 u64 last_flush = NanoTime();
167 uptr last_rss = 0;
168 for (int i = 0;
169 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
170 i++) {
171 SleepForMillis(100);
172 u64 now = NanoTime();
173
174 // Flush memory if requested.
175 if (flags()->flush_memory_ms > 0) {
176 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
177 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
178 FlushShadowMemory();
179 last_flush = NanoTime();
180 }
181 }
182 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
183 if (flags()->memory_limit_mb > 0) {
184 uptr rss = GetRSS();
185 uptr limit = uptr(flags()->memory_limit_mb) << 20;
186 VPrintf(1, "ThreadSanitizer: memory flush check"
187 " RSS=%llu LAST=%llu LIMIT=%llu\n",
188 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
189 if (2 * rss > limit + last_rss) {
190 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
191 FlushShadowMemory();
192 rss = GetRSS();
193 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
194 }
195 last_rss = rss;
196 }
197
198 // Write memory profile if requested.
199 if (mprof_fd != kInvalidFd)
200 MemoryProfiler(ctx, mprof_fd, i);
201
202 // Flush symbolizer cache if requested.
203 if (flags()->flush_symbolizer_ms > 0) {
204 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
205 memory_order_relaxed);
206 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
207 Lock l(&ctx->report_mtx);
208 SpinMutexLock l2(&CommonSanitizerReportMutex);
209 SymbolizeFlush();
210 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
211 }
212 }
213 }
214 }
215
StartBackgroundThread()216 static void StartBackgroundThread() {
217 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
218 }
219
220 #ifndef __mips__
StopBackgroundThread()221 static void StopBackgroundThread() {
222 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
223 internal_join_thread(ctx->background_thread);
224 ctx->background_thread = 0;
225 }
226 #endif
227 #endif
228
DontNeedShadowFor(uptr addr,uptr size)229 void DontNeedShadowFor(uptr addr, uptr size) {
230 uptr shadow_beg = MemToShadow(addr);
231 uptr shadow_end = MemToShadow(addr + size);
232 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
233 }
234
MapShadow(uptr addr,uptr size)235 void MapShadow(uptr addr, uptr size) {
236 // Global data is not 64K aligned, but there are no adjacent mappings,
237 // so we can get away with unaligned mapping.
238 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
239 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
240
241 // Meta shadow is 2:1, so tread carefully.
242 static bool data_mapped = false;
243 static uptr mapped_meta_end = 0;
244 uptr meta_begin = (uptr)MemToMeta(addr);
245 uptr meta_end = (uptr)MemToMeta(addr + size);
246 meta_begin = RoundDownTo(meta_begin, 64 << 10);
247 meta_end = RoundUpTo(meta_end, 64 << 10);
248 if (!data_mapped) {
249 // First call maps data+bss.
250 data_mapped = true;
251 MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
252 } else {
253 // Mapping continous heap.
254 // Windows wants 64K alignment.
255 meta_begin = RoundDownTo(meta_begin, 64 << 10);
256 meta_end = RoundUpTo(meta_end, 64 << 10);
257 if (meta_end <= mapped_meta_end)
258 return;
259 if (meta_begin < mapped_meta_end)
260 meta_begin = mapped_meta_end;
261 MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
262 mapped_meta_end = meta_end;
263 }
264 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
265 addr, addr+size, meta_begin, meta_end);
266 }
267
MapThreadTrace(uptr addr,uptr size)268 void MapThreadTrace(uptr addr, uptr size) {
269 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
270 CHECK_GE(addr, kTraceMemBeg);
271 CHECK_LE(addr + size, kTraceMemEnd);
272 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
273 uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
274 if (addr1 != addr) {
275 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
276 addr, size, addr1);
277 Die();
278 }
279 }
280
CheckShadowMapping()281 static void CheckShadowMapping() {
282 for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
283 const uptr beg = UserRegions[i];
284 const uptr end = UserRegions[i + 1];
285 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
286 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
287 for (int x = -1; x <= 1; x++) {
288 const uptr p = p0 + x;
289 if (p < beg || p >= end)
290 continue;
291 const uptr s = MemToShadow(p);
292 const uptr m = (uptr)MemToMeta(p);
293 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
294 CHECK(IsAppMem(p));
295 CHECK(IsShadowMem(s));
296 CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
297 CHECK(IsMetaMem(m));
298 }
299 }
300 }
301 }
302
Initialize(ThreadState * thr)303 void Initialize(ThreadState *thr) {
304 // Thread safe because done before all threads exist.
305 static bool is_initialized = false;
306 if (is_initialized)
307 return;
308 is_initialized = true;
309 // We are not ready to handle interceptors yet.
310 ScopedIgnoreInterceptors ignore;
311 SanitizerToolName = "ThreadSanitizer";
312 // Install tool-specific callbacks in sanitizer_common.
313 SetCheckFailedCallback(TsanCheckFailed);
314
315 ctx = new(ctx_placeholder) Context;
316 const char *options = GetEnv(kTsanOptionsEnv);
317 InitializeFlags(&ctx->flags, options);
318 #ifndef SANITIZER_GO
319 InitializeAllocator();
320 #endif
321 InitializeInterceptors();
322 CheckShadowMapping();
323 InitializePlatform();
324 InitializeMutex();
325 InitializeDynamicAnnotations();
326 #ifndef SANITIZER_GO
327 InitializeShadowMemory();
328 #endif
329 // Setup correct file descriptor for error reports.
330 __sanitizer_set_report_path(common_flags()->log_path);
331 InitializeSuppressions();
332 #ifndef SANITIZER_GO
333 InitializeLibIgnore();
334 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
335 // On MIPS, TSan initialization is run before
336 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
337 // new threads.
338 #ifndef __mips__
339 StartBackgroundThread();
340 SetSandboxingCallback(StopBackgroundThread);
341 #endif
342 #endif
343 if (common_flags()->detect_deadlocks)
344 ctx->dd = DDetector::Create(flags());
345
346 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
347 (int)internal_getpid());
348
349 // Initialize thread 0.
350 int tid = ThreadCreate(thr, 0, 0, true);
351 CHECK_EQ(tid, 0);
352 ThreadStart(thr, tid, internal_getpid());
353 ctx->initialized = true;
354
355 if (flags()->stop_on_start) {
356 Printf("ThreadSanitizer is suspended at startup (pid %d)."
357 " Call __tsan_resume().\n",
358 (int)internal_getpid());
359 while (__tsan_resumed == 0) {}
360 }
361
362 OnInitialize();
363 }
364
Finalize(ThreadState * thr)365 int Finalize(ThreadState *thr) {
366 bool failed = false;
367
368 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
369 SleepForMillis(flags()->atexit_sleep_ms);
370
371 // Wait for pending reports.
372 ctx->report_mtx.Lock();
373 CommonSanitizerReportMutex.Lock();
374 CommonSanitizerReportMutex.Unlock();
375 ctx->report_mtx.Unlock();
376
377 #ifndef SANITIZER_GO
378 if (Verbosity()) AllocatorPrintStats();
379 #endif
380
381 ThreadFinalize(thr);
382
383 if (ctx->nreported) {
384 failed = true;
385 #ifndef SANITIZER_GO
386 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
387 #else
388 Printf("Found %d data race(s)\n", ctx->nreported);
389 #endif
390 }
391
392 if (ctx->nmissed_expected) {
393 failed = true;
394 Printf("ThreadSanitizer: missed %d expected races\n",
395 ctx->nmissed_expected);
396 }
397
398 if (common_flags()->print_suppressions)
399 PrintMatchedSuppressions();
400 #ifndef SANITIZER_GO
401 if (flags()->print_benign)
402 PrintMatchedBenignRaces();
403 #endif
404
405 failed = OnFinalize(failed);
406
407 #if TSAN_COLLECT_STATS
408 StatAggregate(ctx->stat, thr->stat);
409 StatOutput(ctx->stat);
410 #endif
411
412 return failed ? flags()->exitcode : 0;
413 }
414
415 #ifndef SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)416 void ForkBefore(ThreadState *thr, uptr pc) {
417 ctx->thread_registry->Lock();
418 ctx->report_mtx.Lock();
419 }
420
ForkParentAfter(ThreadState * thr,uptr pc)421 void ForkParentAfter(ThreadState *thr, uptr pc) {
422 ctx->report_mtx.Unlock();
423 ctx->thread_registry->Unlock();
424 }
425
ForkChildAfter(ThreadState * thr,uptr pc)426 void ForkChildAfter(ThreadState *thr, uptr pc) {
427 ctx->report_mtx.Unlock();
428 ctx->thread_registry->Unlock();
429
430 uptr nthread = 0;
431 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
432 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
433 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
434 if (nthread == 1) {
435 StartBackgroundThread();
436 } else {
437 // We've just forked a multi-threaded process. We cannot reasonably function
438 // after that (some mutexes may be locked before fork). So just enable
439 // ignores for everything in the hope that we will exec soon.
440 ctx->after_multithreaded_fork = true;
441 thr->ignore_interceptors++;
442 ThreadIgnoreBegin(thr, pc);
443 ThreadIgnoreSyncBegin(thr, pc);
444 }
445 }
446 #endif
447
448 #ifdef SANITIZER_GO
449 NOINLINE
GrowShadowStack(ThreadState * thr)450 void GrowShadowStack(ThreadState *thr) {
451 const int sz = thr->shadow_stack_end - thr->shadow_stack;
452 const int newsz = 2 * sz;
453 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
454 newsz * sizeof(uptr));
455 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
456 internal_free(thr->shadow_stack);
457 thr->shadow_stack = newstack;
458 thr->shadow_stack_pos = newstack + sz;
459 thr->shadow_stack_end = newstack + newsz;
460 }
461 #endif
462
CurrentStackId(ThreadState * thr,uptr pc)463 u32 CurrentStackId(ThreadState *thr, uptr pc) {
464 if (!thr->is_inited) // May happen during bootstrap.
465 return 0;
466 if (pc != 0) {
467 #ifndef SANITIZER_GO
468 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
469 #else
470 if (thr->shadow_stack_pos == thr->shadow_stack_end)
471 GrowShadowStack(thr);
472 #endif
473 thr->shadow_stack_pos[0] = pc;
474 thr->shadow_stack_pos++;
475 }
476 u32 id = StackDepotPut(
477 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
478 if (pc != 0)
479 thr->shadow_stack_pos--;
480 return id;
481 }
482
TraceSwitch(ThreadState * thr)483 void TraceSwitch(ThreadState *thr) {
484 thr->nomalloc++;
485 Trace *thr_trace = ThreadTrace(thr->tid);
486 Lock l(&thr_trace->mtx);
487 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
488 TraceHeader *hdr = &thr_trace->headers[trace];
489 hdr->epoch0 = thr->fast_state.epoch();
490 ObtainCurrentStack(thr, 0, &hdr->stack0);
491 hdr->mset0 = thr->mset;
492 thr->nomalloc--;
493 }
494
ThreadTrace(int tid)495 Trace *ThreadTrace(int tid) {
496 return (Trace*)GetThreadTraceHeader(tid);
497 }
498
TraceTopPC(ThreadState * thr)499 uptr TraceTopPC(ThreadState *thr) {
500 Event *events = (Event*)GetThreadTrace(thr->tid);
501 uptr pc = events[thr->fast_state.GetTracePos()];
502 return pc;
503 }
504
TraceSize()505 uptr TraceSize() {
506 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
507 }
508
TraceParts()509 uptr TraceParts() {
510 return TraceSize() / kTracePartSize;
511 }
512
513 #ifndef SANITIZER_GO
__tsan_trace_switch()514 extern "C" void __tsan_trace_switch() {
515 TraceSwitch(cur_thread());
516 }
517
__tsan_report_race()518 extern "C" void __tsan_report_race() {
519 ReportRace(cur_thread());
520 }
521 #endif
522
523 ALWAYS_INLINE
LoadShadow(u64 * p)524 Shadow LoadShadow(u64 *p) {
525 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
526 return Shadow(raw);
527 }
528
529 ALWAYS_INLINE
StoreShadow(u64 * sp,u64 s)530 void StoreShadow(u64 *sp, u64 s) {
531 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
532 }
533
534 ALWAYS_INLINE
StoreIfNotYetStored(u64 * sp,u64 * s)535 void StoreIfNotYetStored(u64 *sp, u64 *s) {
536 StoreShadow(sp, *s);
537 *s = 0;
538 }
539
540 ALWAYS_INLINE
HandleRace(ThreadState * thr,u64 * shadow_mem,Shadow cur,Shadow old)541 void HandleRace(ThreadState *thr, u64 *shadow_mem,
542 Shadow cur, Shadow old) {
543 thr->racy_state[0] = cur.raw();
544 thr->racy_state[1] = old.raw();
545 thr->racy_shadow_addr = shadow_mem;
546 #ifndef SANITIZER_GO
547 HACKY_CALL(__tsan_report_race);
548 #else
549 ReportRace(thr);
550 #endif
551 }
552
HappensBefore(Shadow old,ThreadState * thr)553 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
554 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
555 }
556
557 ALWAYS_INLINE
MemoryAccessImpl1(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)558 void MemoryAccessImpl1(ThreadState *thr, uptr addr,
559 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
560 u64 *shadow_mem, Shadow cur) {
561 StatInc(thr, StatMop);
562 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
563 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
564
565 // This potentially can live in an MMX/SSE scratch register.
566 // The required intrinsics are:
567 // __m128i _mm_move_epi64(__m128i*);
568 // _mm_storel_epi64(u64*, __m128i);
569 u64 store_word = cur.raw();
570
571 // scan all the shadow values and dispatch to 4 categories:
572 // same, replace, candidate and race (see comments below).
573 // we consider only 3 cases regarding access sizes:
574 // equal, intersect and not intersect. initially I considered
575 // larger and smaller as well, it allowed to replace some
576 // 'candidates' with 'same' or 'replace', but I think
577 // it's just not worth it (performance- and complexity-wise).
578
579 Shadow old(0);
580
581 // It release mode we manually unroll the loop,
582 // because empirically gcc generates better code this way.
583 // However, we can't afford unrolling in debug mode, because the function
584 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
585 // threads, which is not enough for the unrolled loop.
586 #if SANITIZER_DEBUG
587 for (int idx = 0; idx < 4; idx++) {
588 #include "tsan_update_shadow_word_inl.h"
589 }
590 #else
591 int idx = 0;
592 #include "tsan_update_shadow_word_inl.h"
593 idx = 1;
594 #include "tsan_update_shadow_word_inl.h"
595 idx = 2;
596 #include "tsan_update_shadow_word_inl.h"
597 idx = 3;
598 #include "tsan_update_shadow_word_inl.h"
599 #endif
600
601 // we did not find any races and had already stored
602 // the current access info, so we are done
603 if (LIKELY(store_word == 0))
604 return;
605 // choose a random candidate slot and replace it
606 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
607 StatInc(thr, StatShadowReplace);
608 return;
609 RACE:
610 HandleRace(thr, shadow_mem, cur, old);
611 return;
612 }
613
UnalignedMemoryAccess(ThreadState * thr,uptr pc,uptr addr,int size,bool kAccessIsWrite,bool kIsAtomic)614 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
615 int size, bool kAccessIsWrite, bool kIsAtomic) {
616 while (size) {
617 int size1 = 1;
618 int kAccessSizeLog = kSizeLog1;
619 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
620 size1 = 8;
621 kAccessSizeLog = kSizeLog8;
622 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
623 size1 = 4;
624 kAccessSizeLog = kSizeLog4;
625 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
626 size1 = 2;
627 kAccessSizeLog = kSizeLog2;
628 }
629 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
630 addr += size1;
631 size -= size1;
632 }
633 }
634
635 ALWAYS_INLINE
ContainsSameAccessSlow(u64 * s,u64 a,u64 sync_epoch,bool is_write)636 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
637 Shadow cur(a);
638 for (uptr i = 0; i < kShadowCnt; i++) {
639 Shadow old(LoadShadow(&s[i]));
640 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
641 old.TidWithIgnore() == cur.TidWithIgnore() &&
642 old.epoch() > sync_epoch &&
643 old.IsAtomic() == cur.IsAtomic() &&
644 old.IsRead() <= cur.IsRead())
645 return true;
646 }
647 return false;
648 }
649
650 #if defined(__SSE3__)
651 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
652 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
653 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
654 ALWAYS_INLINE
ContainsSameAccessFast(u64 * s,u64 a,u64 sync_epoch,bool is_write)655 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
656 // This is an optimized version of ContainsSameAccessSlow.
657 // load current access into access[0:63]
658 const m128 access = _mm_cvtsi64_si128(a);
659 // duplicate high part of access in addr0:
660 // addr0[0:31] = access[32:63]
661 // addr0[32:63] = access[32:63]
662 // addr0[64:95] = access[32:63]
663 // addr0[96:127] = access[32:63]
664 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
665 // load 4 shadow slots
666 const m128 shadow0 = _mm_load_si128((__m128i*)s);
667 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
668 // load high parts of 4 shadow slots into addr_vect:
669 // addr_vect[0:31] = shadow0[32:63]
670 // addr_vect[32:63] = shadow0[96:127]
671 // addr_vect[64:95] = shadow1[32:63]
672 // addr_vect[96:127] = shadow1[96:127]
673 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
674 if (!is_write) {
675 // set IsRead bit in addr_vect
676 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
677 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
678 addr_vect = _mm_or_si128(addr_vect, rw_mask);
679 }
680 // addr0 == addr_vect?
681 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
682 // epoch1[0:63] = sync_epoch
683 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
684 // epoch[0:31] = sync_epoch[0:31]
685 // epoch[32:63] = sync_epoch[0:31]
686 // epoch[64:95] = sync_epoch[0:31]
687 // epoch[96:127] = sync_epoch[0:31]
688 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
689 // load low parts of shadow cell epochs into epoch_vect:
690 // epoch_vect[0:31] = shadow0[0:31]
691 // epoch_vect[32:63] = shadow0[64:95]
692 // epoch_vect[64:95] = shadow1[0:31]
693 // epoch_vect[96:127] = shadow1[64:95]
694 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
695 // epoch_vect >= sync_epoch?
696 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
697 // addr_res & epoch_res
698 const m128 res = _mm_and_si128(addr_res, epoch_res);
699 // mask[0] = res[7]
700 // mask[1] = res[15]
701 // ...
702 // mask[15] = res[127]
703 const int mask = _mm_movemask_epi8(res);
704 return mask != 0;
705 }
706 #endif
707
708 ALWAYS_INLINE
ContainsSameAccess(u64 * s,u64 a,u64 sync_epoch,bool is_write)709 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
710 #if defined(__SSE3__)
711 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
712 // NOTE: this check can fail if the shadow is concurrently mutated
713 // by other threads. But it still can be useful if you modify
714 // ContainsSameAccessFast and want to ensure that it's not completely broken.
715 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
716 return res;
717 #else
718 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
719 #endif
720 }
721
722 ALWAYS_INLINE USED
MemoryAccess(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic)723 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
724 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
725 u64 *shadow_mem = (u64*)MemToShadow(addr);
726 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
727 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
728 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
729 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
730 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
731 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
732 #if SANITIZER_DEBUG
733 if (!IsAppMem(addr)) {
734 Printf("Access to non app mem %zx\n", addr);
735 DCHECK(IsAppMem(addr));
736 }
737 if (!IsShadowMem((uptr)shadow_mem)) {
738 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
739 DCHECK(IsShadowMem((uptr)shadow_mem));
740 }
741 #endif
742
743 if (kCppMode && *shadow_mem == kShadowRodata) {
744 // Access to .rodata section, no races here.
745 // Measurements show that it can be 10-20% of all memory accesses.
746 StatInc(thr, StatMop);
747 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
748 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
749 StatInc(thr, StatMopRodata);
750 return;
751 }
752
753 FastState fast_state = thr->fast_state;
754 if (fast_state.GetIgnoreBit()) {
755 StatInc(thr, StatMop);
756 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
757 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
758 StatInc(thr, StatMopIgnored);
759 return;
760 }
761
762 Shadow cur(fast_state);
763 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
764 cur.SetWrite(kAccessIsWrite);
765 cur.SetAtomic(kIsAtomic);
766
767 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
768 thr->fast_synch_epoch, kAccessIsWrite))) {
769 StatInc(thr, StatMop);
770 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
771 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
772 StatInc(thr, StatMopSame);
773 return;
774 }
775
776 if (kCollectHistory) {
777 fast_state.IncrementEpoch();
778 thr->fast_state = fast_state;
779 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
780 cur.IncrementEpoch();
781 }
782
783 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
784 shadow_mem, cur);
785 }
786
787 // Called by MemoryAccessRange in tsan_rtl_thread.cc
788 ALWAYS_INLINE USED
MemoryAccessImpl(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)789 void MemoryAccessImpl(ThreadState *thr, uptr addr,
790 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
791 u64 *shadow_mem, Shadow cur) {
792 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
793 thr->fast_synch_epoch, kAccessIsWrite))) {
794 StatInc(thr, StatMop);
795 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
796 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
797 StatInc(thr, StatMopSame);
798 return;
799 }
800
801 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
802 shadow_mem, cur);
803 }
804
MemoryRangeSet(ThreadState * thr,uptr pc,uptr addr,uptr size,u64 val)805 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
806 u64 val) {
807 (void)thr;
808 (void)pc;
809 if (size == 0)
810 return;
811 // FIXME: fix me.
812 uptr offset = addr % kShadowCell;
813 if (offset) {
814 offset = kShadowCell - offset;
815 if (size <= offset)
816 return;
817 addr += offset;
818 size -= offset;
819 }
820 DCHECK_EQ(addr % 8, 0);
821 // If a user passes some insane arguments (memset(0)),
822 // let it just crash as usual.
823 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
824 return;
825 // Don't want to touch lots of shadow memory.
826 // If a program maps 10MB stack, there is no need reset the whole range.
827 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
828 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
829 // so we do it only for C/C++.
830 if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
831 u64 *p = (u64*)MemToShadow(addr);
832 CHECK(IsShadowMem((uptr)p));
833 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
834 // FIXME: may overwrite a part outside the region
835 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
836 p[i++] = val;
837 for (uptr j = 1; j < kShadowCnt; j++)
838 p[i++] = 0;
839 }
840 } else {
841 // The region is big, reset only beginning and end.
842 const uptr kPageSize = GetPageSizeCached();
843 u64 *begin = (u64*)MemToShadow(addr);
844 u64 *end = begin + size / kShadowCell * kShadowCnt;
845 u64 *p = begin;
846 // Set at least first kPageSize/2 to page boundary.
847 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
848 *p++ = val;
849 for (uptr j = 1; j < kShadowCnt; j++)
850 *p++ = 0;
851 }
852 // Reset middle part.
853 u64 *p1 = p;
854 p = RoundDown(end, kPageSize);
855 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
856 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
857 // Set the ending.
858 while (p < end) {
859 *p++ = val;
860 for (uptr j = 1; j < kShadowCnt; j++)
861 *p++ = 0;
862 }
863 }
864 }
865
MemoryResetRange(ThreadState * thr,uptr pc,uptr addr,uptr size)866 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
867 MemoryRangeSet(thr, pc, addr, size, 0);
868 }
869
MemoryRangeFreed(ThreadState * thr,uptr pc,uptr addr,uptr size)870 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
871 // Processing more than 1k (4k of shadow) is expensive,
872 // can cause excessive memory consumption (user does not necessary touch
873 // the whole range) and most likely unnecessary.
874 if (size > 1024)
875 size = 1024;
876 CHECK_EQ(thr->is_freeing, false);
877 thr->is_freeing = true;
878 MemoryAccessRange(thr, pc, addr, size, true);
879 thr->is_freeing = false;
880 if (kCollectHistory) {
881 thr->fast_state.IncrementEpoch();
882 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
883 }
884 Shadow s(thr->fast_state);
885 s.ClearIgnoreBit();
886 s.MarkAsFreed();
887 s.SetWrite(true);
888 s.SetAddr0AndSizeLog(0, 3);
889 MemoryRangeSet(thr, pc, addr, size, s.raw());
890 }
891
MemoryRangeImitateWrite(ThreadState * thr,uptr pc,uptr addr,uptr size)892 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
893 if (kCollectHistory) {
894 thr->fast_state.IncrementEpoch();
895 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
896 }
897 Shadow s(thr->fast_state);
898 s.ClearIgnoreBit();
899 s.SetWrite(true);
900 s.SetAddr0AndSizeLog(0, 3);
901 MemoryRangeSet(thr, pc, addr, size, s.raw());
902 }
903
904 ALWAYS_INLINE USED
FuncEntry(ThreadState * thr,uptr pc)905 void FuncEntry(ThreadState *thr, uptr pc) {
906 StatInc(thr, StatFuncEnter);
907 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
908 if (kCollectHistory) {
909 thr->fast_state.IncrementEpoch();
910 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
911 }
912
913 // Shadow stack maintenance can be replaced with
914 // stack unwinding during trace switch (which presumably must be faster).
915 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
916 #ifndef SANITIZER_GO
917 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
918 #else
919 if (thr->shadow_stack_pos == thr->shadow_stack_end)
920 GrowShadowStack(thr);
921 #endif
922 thr->shadow_stack_pos[0] = pc;
923 thr->shadow_stack_pos++;
924 }
925
926 ALWAYS_INLINE USED
FuncExit(ThreadState * thr)927 void FuncExit(ThreadState *thr) {
928 StatInc(thr, StatFuncExit);
929 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
930 if (kCollectHistory) {
931 thr->fast_state.IncrementEpoch();
932 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
933 }
934
935 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
936 #ifndef SANITIZER_GO
937 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
938 #endif
939 thr->shadow_stack_pos--;
940 }
941
ThreadIgnoreBegin(ThreadState * thr,uptr pc)942 void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
943 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
944 thr->ignore_reads_and_writes++;
945 CHECK_GT(thr->ignore_reads_and_writes, 0);
946 thr->fast_state.SetIgnoreBit();
947 #ifndef SANITIZER_GO
948 if (!ctx->after_multithreaded_fork)
949 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
950 #endif
951 }
952
ThreadIgnoreEnd(ThreadState * thr,uptr pc)953 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
954 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
955 thr->ignore_reads_and_writes--;
956 CHECK_GE(thr->ignore_reads_and_writes, 0);
957 if (thr->ignore_reads_and_writes == 0) {
958 thr->fast_state.ClearIgnoreBit();
959 #ifndef SANITIZER_GO
960 thr->mop_ignore_set.Reset();
961 #endif
962 }
963 }
964
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc)965 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
966 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
967 thr->ignore_sync++;
968 CHECK_GT(thr->ignore_sync, 0);
969 #ifndef SANITIZER_GO
970 if (!ctx->after_multithreaded_fork)
971 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
972 #endif
973 }
974
ThreadIgnoreSyncEnd(ThreadState * thr,uptr pc)975 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
976 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
977 thr->ignore_sync--;
978 CHECK_GE(thr->ignore_sync, 0);
979 #ifndef SANITIZER_GO
980 if (thr->ignore_sync == 0)
981 thr->sync_ignore_set.Reset();
982 #endif
983 }
984
operator ==(const MD5Hash & other) const985 bool MD5Hash::operator==(const MD5Hash &other) const {
986 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
987 }
988
989 #if SANITIZER_DEBUG
build_consistency_debug()990 void build_consistency_debug() {}
991 #else
build_consistency_release()992 void build_consistency_release() {}
993 #endif
994
995 #if TSAN_COLLECT_STATS
build_consistency_stats()996 void build_consistency_stats() {}
997 #else
build_consistency_nostats()998 void build_consistency_nostats() {}
999 #endif
1000
1001 } // namespace __tsan
1002
1003 #ifndef SANITIZER_GO
1004 // Must be included in this file to make sure everything is inlined.
1005 #include "tsan_interface_inl.h"
1006 #endif
1007