1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "tsan_mman.h"
17 #include "tsan_rtl.h"
18 #include "tsan_report.h"
19 #include "tsan_flags.h"
20
21 // May be overriden by front-end.
__sanitizer_malloc_hook(void * ptr,uptr size)22 extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
23 (void)ptr;
24 (void)size;
25 }
26
__sanitizer_free_hook(void * ptr)27 extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
28 (void)ptr;
29 }
30
31 namespace __tsan {
32
33 struct MapUnmapCallback {
OnMap__tsan::MapUnmapCallback34 void OnMap(uptr p, uptr size) const { }
OnUnmap__tsan::MapUnmapCallback35 void OnUnmap(uptr p, uptr size) const {
36 // We are about to unmap a chunk of user memory.
37 // Mark the corresponding shadow memory as not needed.
38 DontNeedShadowFor(p, size);
39 }
40 };
41
42 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
allocator()43 Allocator *allocator() {
44 return reinterpret_cast<Allocator*>(&allocator_placeholder);
45 }
46
InitializeAllocator()47 void InitializeAllocator() {
48 allocator()->Init(common_flags()->allocator_may_return_null);
49 }
50
AllocatorThreadStart(ThreadState * thr)51 void AllocatorThreadStart(ThreadState *thr) {
52 allocator()->InitCache(&thr->alloc_cache);
53 internal_allocator()->InitCache(&thr->internal_alloc_cache);
54 }
55
AllocatorThreadFinish(ThreadState * thr)56 void AllocatorThreadFinish(ThreadState *thr) {
57 allocator()->DestroyCache(&thr->alloc_cache);
58 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
59 }
60
AllocatorPrintStats()61 void AllocatorPrintStats() {
62 allocator()->PrintStats();
63 }
64
SignalUnsafeCall(ThreadState * thr,uptr pc)65 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
66 if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
67 !flags()->report_signal_unsafe)
68 return;
69 VarSizeStackTrace stack;
70 ObtainCurrentStack(thr, pc, &stack);
71 ThreadRegistryLock l(ctx->thread_registry);
72 ScopedReport rep(ReportTypeSignalUnsafe);
73 if (!IsFiredSuppression(ctx, rep, stack)) {
74 rep.AddStack(stack, true);
75 OutputReport(thr, rep);
76 }
77 }
78
user_alloc(ThreadState * thr,uptr pc,uptr sz,uptr align,bool signal)79 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
80 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
81 return allocator()->ReturnNullOrDie();
82 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
83 if (p == 0)
84 return 0;
85 if (ctx && ctx->initialized)
86 OnUserAlloc(thr, pc, (uptr)p, sz, true);
87 if (signal)
88 SignalUnsafeCall(thr, pc);
89 return p;
90 }
91
user_calloc(ThreadState * thr,uptr pc,uptr size,uptr n)92 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
93 if (CallocShouldReturnNullDueToOverflow(size, n))
94 return allocator()->ReturnNullOrDie();
95 void *p = user_alloc(thr, pc, n * size);
96 if (p)
97 internal_memset(p, 0, n * size);
98 return p;
99 }
100
user_free(ThreadState * thr,uptr pc,void * p,bool signal)101 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
102 if (ctx && ctx->initialized)
103 OnUserFree(thr, pc, (uptr)p, true);
104 allocator()->Deallocate(&thr->alloc_cache, p);
105 if (signal)
106 SignalUnsafeCall(thr, pc);
107 }
108
OnUserAlloc(ThreadState * thr,uptr pc,uptr p,uptr sz,bool write)109 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
110 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
111 ctx->metamap.AllocBlock(thr, pc, p, sz);
112 if (write && thr->ignore_reads_and_writes == 0)
113 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
114 else
115 MemoryResetRange(thr, pc, (uptr)p, sz);
116 }
117
OnUserFree(ThreadState * thr,uptr pc,uptr p,bool write)118 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
119 CHECK_NE(p, (void*)0);
120 uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
121 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
122 if (write && thr->ignore_reads_and_writes == 0)
123 MemoryRangeFreed(thr, pc, (uptr)p, sz);
124 }
125
user_realloc(ThreadState * thr,uptr pc,void * p,uptr sz)126 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
127 void *p2 = 0;
128 // FIXME: Handle "shrinking" more efficiently,
129 // it seems that some software actually does this.
130 if (sz) {
131 p2 = user_alloc(thr, pc, sz);
132 if (p2 == 0)
133 return 0;
134 if (p) {
135 uptr oldsz = user_alloc_usable_size(p);
136 internal_memcpy(p2, p, min(oldsz, sz));
137 }
138 }
139 if (p)
140 user_free(thr, pc, p);
141 return p2;
142 }
143
user_alloc_usable_size(const void * p)144 uptr user_alloc_usable_size(const void *p) {
145 if (p == 0)
146 return 0;
147 MBlock *b = ctx->metamap.GetBlock((uptr)p);
148 return b ? b->siz : 0;
149 }
150
invoke_malloc_hook(void * ptr,uptr size)151 void invoke_malloc_hook(void *ptr, uptr size) {
152 ThreadState *thr = cur_thread();
153 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
154 return;
155 __sanitizer_malloc_hook(ptr, size);
156 }
157
invoke_free_hook(void * ptr)158 void invoke_free_hook(void *ptr) {
159 ThreadState *thr = cur_thread();
160 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
161 return;
162 __sanitizer_free_hook(ptr);
163 }
164
internal_alloc(MBlockType typ,uptr sz)165 void *internal_alloc(MBlockType typ, uptr sz) {
166 ThreadState *thr = cur_thread();
167 if (thr->nomalloc) {
168 thr->nomalloc = 0; // CHECK calls internal_malloc().
169 CHECK(0);
170 }
171 return InternalAlloc(sz, &thr->internal_alloc_cache);
172 }
173
internal_free(void * p)174 void internal_free(void *p) {
175 ThreadState *thr = cur_thread();
176 if (thr->nomalloc) {
177 thr->nomalloc = 0; // CHECK calls internal_malloc().
178 CHECK(0);
179 }
180 InternalFree(p, &thr->internal_alloc_cache);
181 }
182
183 } // namespace __tsan
184
185 using namespace __tsan;
186
187 extern "C" {
__sanitizer_get_current_allocated_bytes()188 uptr __sanitizer_get_current_allocated_bytes() {
189 uptr stats[AllocatorStatCount];
190 allocator()->GetStats(stats);
191 return stats[AllocatorStatAllocated];
192 }
193
__sanitizer_get_heap_size()194 uptr __sanitizer_get_heap_size() {
195 uptr stats[AllocatorStatCount];
196 allocator()->GetStats(stats);
197 return stats[AllocatorStatMapped];
198 }
199
__sanitizer_get_free_bytes()200 uptr __sanitizer_get_free_bytes() {
201 return 1;
202 }
203
__sanitizer_get_unmapped_bytes()204 uptr __sanitizer_get_unmapped_bytes() {
205 return 1;
206 }
207
__sanitizer_get_estimated_allocated_size(uptr size)208 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
209 return size;
210 }
211
__sanitizer_get_ownership(const void * p)212 int __sanitizer_get_ownership(const void *p) {
213 return allocator()->GetBlockBegin(p) != 0;
214 }
215
__sanitizer_get_allocated_size(const void * p)216 uptr __sanitizer_get_allocated_size(const void *p) {
217 return user_alloc_usable_size(p);
218 }
219
__tsan_on_thread_idle()220 void __tsan_on_thread_idle() {
221 ThreadState *thr = cur_thread();
222 allocator()->SwallowCache(&thr->alloc_cache);
223 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
224 ctx->metamap.OnThreadIdle(thr);
225 }
226 } // extern "C"
227