1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "lsan_common.h"
15
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27
28 #if CAN_SANITIZE_LEAKS
29 namespace __lsan {
30
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32 // also to protect the global list of root regions.
33 BlockingMutex global_mutex(LINKER_INITIALIZED);
34
35 Flags lsan_flags;
36
37
DisableCounterUnderflow()38 void DisableCounterUnderflow() {
39 if (common_flags()->detect_leaks) {
40 Report("Unmatched call to __lsan_enable().\n");
41 Die();
42 }
43 }
44
SetDefaults()45 void Flags::SetDefaults() {
46 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
47 #include "lsan_flags.inc"
48 #undef LSAN_FLAG
49 }
50
RegisterLsanFlags(FlagParser * parser,Flags * f)51 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
52 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
53 RegisterFlag(parser, #Name, Description, &f->Name);
54 #include "lsan_flags.inc"
55 #undef LSAN_FLAG
56 }
57
58 #define LOG_POINTERS(...) \
59 do { \
60 if (flags()->log_pointers) Report(__VA_ARGS__); \
61 } while (0)
62
63 #define LOG_THREADS(...) \
64 do { \
65 if (flags()->log_threads) Report(__VA_ARGS__); \
66 } while (0)
67
68 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
69 static SuppressionContext *suppression_ctx = nullptr;
70 static const char kSuppressionLeak[] = "leak";
71 static const char *kSuppressionTypes[] = { kSuppressionLeak };
72 static const char kStdSuppressions[] =
73 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
74 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
75 // definition.
76 "leak:*pthread_exit*\n"
77 #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
78 #if SANITIZER_MAC
79 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
80 "leak:*_os_trace*\n"
81 #endif
82 // TLS leak in some glibc versions, described in
83 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
84 "leak:*tls_get_addr*\n";
85
InitializeSuppressions()86 void InitializeSuppressions() {
87 CHECK_EQ(nullptr, suppression_ctx);
88 suppression_ctx = new (suppression_placeholder)
89 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
90 suppression_ctx->ParseFromFile(flags()->suppressions);
91 if (&__lsan_default_suppressions)
92 suppression_ctx->Parse(__lsan_default_suppressions());
93 suppression_ctx->Parse(kStdSuppressions);
94 }
95
GetSuppressionContext()96 static SuppressionContext *GetSuppressionContext() {
97 CHECK(suppression_ctx);
98 return suppression_ctx;
99 }
100
101 static InternalMmapVector<RootRegion> *root_regions;
102
GetRootRegions()103 InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
104
InitializeRootRegions()105 void InitializeRootRegions() {
106 CHECK(!root_regions);
107 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
108 root_regions = new (placeholder) InternalMmapVector<RootRegion>();
109 }
110
InitCommonLsan()111 void InitCommonLsan() {
112 InitializeRootRegions();
113 if (common_flags()->detect_leaks) {
114 // Initialization which can fail or print warnings should only be done if
115 // LSan is actually enabled.
116 InitializeSuppressions();
117 InitializePlatformSpecificModules();
118 }
119 }
120
121 class Decorator: public __sanitizer::SanitizerCommonDecorator {
122 public:
Decorator()123 Decorator() : SanitizerCommonDecorator() { }
Error()124 const char *Error() { return Red(); }
Leak()125 const char *Leak() { return Blue(); }
126 };
127
CanBeAHeapPointer(uptr p)128 static inline bool CanBeAHeapPointer(uptr p) {
129 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
130 // bound on heap addresses.
131 const uptr kMinAddress = 4 * 4096;
132 if (p < kMinAddress) return false;
133 #if defined(__x86_64__)
134 // Accept only canonical form user-space addresses.
135 return ((p >> 47) == 0);
136 #elif defined(__mips64)
137 return ((p >> 40) == 0);
138 #elif defined(__aarch64__)
139 unsigned runtimeVMA =
140 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
141 return ((p >> runtimeVMA) == 0);
142 #else
143 return true;
144 #endif
145 }
146
147 // Scans the memory range, looking for byte patterns that point into allocator
148 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
149 // There are two usage modes for this function: finding reachable chunks
150 // (|tag| = kReachable) and finding indirectly leaked chunks
151 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
152 // so |frontier| = 0.
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)153 void ScanRangeForPointers(uptr begin, uptr end,
154 Frontier *frontier,
155 const char *region_type, ChunkTag tag) {
156 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
157 const uptr alignment = flags()->pointer_alignment();
158 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
159 uptr pp = begin;
160 if (pp % alignment)
161 pp = pp + alignment - pp % alignment;
162 for (; pp + sizeof(void *) <= end; pp += alignment) {
163 void *p = *reinterpret_cast<void **>(pp);
164 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
165 uptr chunk = PointsIntoChunk(p);
166 if (!chunk) continue;
167 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
168 if (chunk == begin) continue;
169 LsanMetadata m(chunk);
170 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
171
172 // Do this check relatively late so we can log only the interesting cases.
173 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
174 LOG_POINTERS(
175 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
176 "%zu.\n",
177 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
178 continue;
179 }
180
181 m.set_tag(tag);
182 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
183 chunk, chunk + m.requested_size(), m.requested_size());
184 if (frontier)
185 frontier->push_back(chunk);
186 }
187 }
188
189 // Scans a global range for pointers
ScanGlobalRange(uptr begin,uptr end,Frontier * frontier)190 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
191 uptr allocator_begin = 0, allocator_end = 0;
192 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
193 if (begin <= allocator_begin && allocator_begin < end) {
194 CHECK_LE(allocator_begin, allocator_end);
195 CHECK_LE(allocator_end, end);
196 if (begin < allocator_begin)
197 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
198 kReachable);
199 if (allocator_end < end)
200 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
201 } else {
202 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
203 }
204 }
205
ForEachExtraStackRangeCb(uptr begin,uptr end,void * arg)206 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
207 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
208 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
209 }
210
211 #if SANITIZER_FUCHSIA
212
213 // Fuchsia handles all threads together with its own callback.
ProcessThreads(SuspendedThreadsList const &,Frontier *)214 static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
215
216 #else
217
218 #if SANITIZER_ANDROID
219 // FIXME: Move this out into *libcdep.cpp
220 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
221 pid_t, void (*cb)(void *, void *, uptr, void *), void *);
222 #endif
223
224 // Scans thread data (stacks and TLS) for heap pointers.
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier)225 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
226 Frontier *frontier) {
227 InternalMmapVector<uptr> registers;
228 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
229 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
230 LOG_THREADS("Processing thread %d.\n", os_id);
231 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
232 DTLS *dtls;
233 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
234 &tls_begin, &tls_end,
235 &cache_begin, &cache_end, &dtls);
236 if (!thread_found) {
237 // If a thread can't be found in the thread registry, it's probably in the
238 // process of destruction. Log this event and move on.
239 LOG_THREADS("Thread %d not found in registry.\n", os_id);
240 continue;
241 }
242 uptr sp;
243 PtraceRegistersStatus have_registers =
244 suspended_threads.GetRegistersAndSP(i, ®isters, &sp);
245 if (have_registers != REGISTERS_AVAILABLE) {
246 Report("Unable to get registers from thread %d.\n", os_id);
247 // If unable to get SP, consider the entire stack to be reachable unless
248 // GetRegistersAndSP failed with ESRCH.
249 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
250 sp = stack_begin;
251 }
252
253 if (flags()->use_registers && have_registers) {
254 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
255 uptr registers_end =
256 reinterpret_cast<uptr>(registers.data() + registers.size());
257 ScanRangeForPointers(registers_begin, registers_end, frontier,
258 "REGISTERS", kReachable);
259 }
260
261 if (flags()->use_stacks) {
262 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
263 if (sp < stack_begin || sp >= stack_end) {
264 // SP is outside the recorded stack range (e.g. the thread is running a
265 // signal handler on alternate stack, or swapcontext was used).
266 // Again, consider the entire stack range to be reachable.
267 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
268 uptr page_size = GetPageSizeCached();
269 int skipped = 0;
270 while (stack_begin < stack_end &&
271 !IsAccessibleMemoryRange(stack_begin, 1)) {
272 skipped++;
273 stack_begin += page_size;
274 }
275 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
276 skipped, stack_begin, stack_end);
277 } else {
278 // Shrink the stack range to ignore out-of-scope values.
279 stack_begin = sp;
280 }
281 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
282 kReachable);
283 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
284 }
285
286 if (flags()->use_tls) {
287 if (tls_begin) {
288 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
289 // If the tls and cache ranges don't overlap, scan full tls range,
290 // otherwise, only scan the non-overlapping portions
291 if (cache_begin == cache_end || tls_end < cache_begin ||
292 tls_begin > cache_end) {
293 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
294 } else {
295 if (tls_begin < cache_begin)
296 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
297 kReachable);
298 if (tls_end > cache_end)
299 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
300 kReachable);
301 }
302 }
303 #if SANITIZER_ANDROID
304 auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
305 void *arg) -> void {
306 ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
307 reinterpret_cast<uptr>(dtls_end),
308 reinterpret_cast<Frontier *>(arg), "DTLS",
309 kReachable);
310 };
311
312 // FIXME: There might be a race-condition here (and in Bionic) if the
313 // thread is suspended in the middle of updating its DTLS. IOWs, we
314 // could scan already freed memory. (probably fine for now)
315 __libc_iterate_dynamic_tls(os_id, cb, frontier);
316 #else
317 if (dtls && !DTLSInDestruction(dtls)) {
318 ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
319 uptr dtls_beg = dtv.beg;
320 uptr dtls_end = dtls_beg + dtv.size;
321 if (dtls_beg < dtls_end) {
322 LOG_THREADS("DTLS %zu at %p-%p.\n", id, dtls_beg, dtls_end);
323 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
324 kReachable);
325 }
326 });
327 } else {
328 // We are handling a thread with DTLS under destruction. Log about
329 // this and continue.
330 LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
331 }
332 #endif
333 }
334 }
335 }
336
337 #endif // SANITIZER_FUCHSIA
338
ScanRootRegion(Frontier * frontier,const RootRegion & root_region,uptr region_begin,uptr region_end,bool is_readable)339 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
340 uptr region_begin, uptr region_end, bool is_readable) {
341 uptr intersection_begin = Max(root_region.begin, region_begin);
342 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
343 if (intersection_begin >= intersection_end) return;
344 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
345 root_region.begin, root_region.begin + root_region.size,
346 region_begin, region_end,
347 is_readable ? "readable" : "unreadable");
348 if (is_readable)
349 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
350 kReachable);
351 }
352
ProcessRootRegion(Frontier * frontier,const RootRegion & root_region)353 static void ProcessRootRegion(Frontier *frontier,
354 const RootRegion &root_region) {
355 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
356 MemoryMappedSegment segment;
357 while (proc_maps.Next(&segment)) {
358 ScanRootRegion(frontier, root_region, segment.start, segment.end,
359 segment.IsReadable());
360 }
361 }
362
363 // Scans root regions for heap pointers.
ProcessRootRegions(Frontier * frontier)364 static void ProcessRootRegions(Frontier *frontier) {
365 if (!flags()->use_root_regions) return;
366 CHECK(root_regions);
367 for (uptr i = 0; i < root_regions->size(); i++) {
368 ProcessRootRegion(frontier, (*root_regions)[i]);
369 }
370 }
371
FloodFillTag(Frontier * frontier,ChunkTag tag)372 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
373 while (frontier->size()) {
374 uptr next_chunk = frontier->back();
375 frontier->pop_back();
376 LsanMetadata m(next_chunk);
377 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
378 "HEAP", tag);
379 }
380 }
381
382 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
383 // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)384 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
385 chunk = GetUserBegin(chunk);
386 LsanMetadata m(chunk);
387 if (m.allocated() && m.tag() != kReachable) {
388 ScanRangeForPointers(chunk, chunk + m.requested_size(),
389 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
390 }
391 }
392
393 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
394 // frontier.
CollectIgnoredCb(uptr chunk,void * arg)395 static void CollectIgnoredCb(uptr chunk, void *arg) {
396 CHECK(arg);
397 chunk = GetUserBegin(chunk);
398 LsanMetadata m(chunk);
399 if (m.allocated() && m.tag() == kIgnored) {
400 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
401 chunk, chunk + m.requested_size(), m.requested_size());
402 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
403 }
404 }
405
GetCallerPC(u32 stack_id,StackDepotReverseMap * map)406 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
407 CHECK(stack_id);
408 StackTrace stack = map->Get(stack_id);
409 // The top frame is our malloc/calloc/etc. The next frame is the caller.
410 if (stack.size >= 2)
411 return stack.trace[1];
412 return 0;
413 }
414
415 struct InvalidPCParam {
416 Frontier *frontier;
417 StackDepotReverseMap *stack_depot_reverse_map;
418 bool skip_linker_allocations;
419 };
420
421 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
422 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
MarkInvalidPCCb(uptr chunk,void * arg)423 static void MarkInvalidPCCb(uptr chunk, void *arg) {
424 CHECK(arg);
425 InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
426 chunk = GetUserBegin(chunk);
427 LsanMetadata m(chunk);
428 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
429 u32 stack_id = m.stack_trace_id();
430 uptr caller_pc = 0;
431 if (stack_id > 0)
432 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
433 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
434 // it as reachable, as we can't properly report its allocation stack anyway.
435 if (caller_pc == 0 || (param->skip_linker_allocations &&
436 GetLinker()->containsAddress(caller_pc))) {
437 m.set_tag(kReachable);
438 param->frontier->push_back(chunk);
439 }
440 }
441 }
442
443 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
444 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
445 // modules accounting etc.
446 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
447 // They are allocated with a __libc_memalign() call in allocate_and_init()
448 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
449 // blocks, but we can make sure they come from our own allocator by intercepting
450 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
451 // addresses are stored in a dynamically allocated array (the DTV) which is
452 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
453 // being reachable from the static TLS, and the dynamic TLS being reachable from
454 // the DTV. This is because the initial DTV is allocated before our interception
455 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
456 // can't special-case it either, since we don't know its size.
457 // Our solution is to include in the root set all allocations made from
458 // ld-linux.so (which is where allocate_and_init() is implemented). This is
459 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
460 // which we don't care about).
461 // On all other platforms, this simply checks to ensure that the caller pc is
462 // valid before reporting chunks as leaked.
ProcessPC(Frontier * frontier)463 void ProcessPC(Frontier *frontier) {
464 StackDepotReverseMap stack_depot_reverse_map;
465 InvalidPCParam arg;
466 arg.frontier = frontier;
467 arg.stack_depot_reverse_map = &stack_depot_reverse_map;
468 arg.skip_linker_allocations =
469 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
470 ForEachChunk(MarkInvalidPCCb, &arg);
471 }
472
473 // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads,Frontier * frontier)474 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
475 Frontier *frontier) {
476 ForEachChunk(CollectIgnoredCb, frontier);
477 ProcessGlobalRegions(frontier);
478 ProcessThreads(suspended_threads, frontier);
479 ProcessRootRegions(frontier);
480 FloodFillTag(frontier, kReachable);
481
482 CHECK_EQ(0, frontier->size());
483 ProcessPC(frontier);
484
485 // The check here is relatively expensive, so we do this in a separate flood
486 // fill. That way we can skip the check for chunks that are reachable
487 // otherwise.
488 LOG_POINTERS("Processing platform-specific allocations.\n");
489 ProcessPlatformSpecificAllocations(frontier);
490 FloodFillTag(frontier, kReachable);
491
492 // Iterate over leaked chunks and mark those that are reachable from other
493 // leaked chunks.
494 LOG_POINTERS("Scanning leaked chunks.\n");
495 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
496 }
497
498 // ForEachChunk callback. Resets the tags to pre-leak-check state.
ResetTagsCb(uptr chunk,void * arg)499 static void ResetTagsCb(uptr chunk, void *arg) {
500 (void)arg;
501 chunk = GetUserBegin(chunk);
502 LsanMetadata m(chunk);
503 if (m.allocated() && m.tag() != kIgnored)
504 m.set_tag(kDirectlyLeaked);
505 }
506
PrintStackTraceById(u32 stack_trace_id)507 static void PrintStackTraceById(u32 stack_trace_id) {
508 CHECK(stack_trace_id);
509 StackDepotGet(stack_trace_id).Print();
510 }
511
512 // ForEachChunk callback. Aggregates information about unreachable chunks into
513 // a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)514 static void CollectLeaksCb(uptr chunk, void *arg) {
515 CHECK(arg);
516 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
517 chunk = GetUserBegin(chunk);
518 LsanMetadata m(chunk);
519 if (!m.allocated()) return;
520 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
521 u32 resolution = flags()->resolution;
522 u32 stack_trace_id = 0;
523 if (resolution > 0) {
524 StackTrace stack = StackDepotGet(m.stack_trace_id());
525 stack.size = Min(stack.size, resolution);
526 stack_trace_id = StackDepotPut(stack);
527 } else {
528 stack_trace_id = m.stack_trace_id();
529 }
530 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
531 m.tag());
532 }
533 }
534
PrintMatchedSuppressions()535 static void PrintMatchedSuppressions() {
536 InternalMmapVector<Suppression *> matched;
537 GetSuppressionContext()->GetMatched(&matched);
538 if (!matched.size())
539 return;
540 const char *line = "-----------------------------------------------------";
541 Printf("%s\n", line);
542 Printf("Suppressions used:\n");
543 Printf(" count bytes template\n");
544 for (uptr i = 0; i < matched.size(); i++)
545 Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
546 &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
547 Printf("%s\n\n", line);
548 }
549
ReportIfNotSuspended(ThreadContextBase * tctx,void * arg)550 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
551 const InternalMmapVector<tid_t> &suspended_threads =
552 *(const InternalMmapVector<tid_t> *)arg;
553 if (tctx->status == ThreadStatusRunning) {
554 uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
555 tctx->os_id, CompareLess<int>());
556 if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
557 Report("Running thread %d was not suspended. False leaks are possible.\n",
558 tctx->os_id);
559 }
560 }
561
562 #if SANITIZER_FUCHSIA
563
564 // Fuchsia provides a libc interface that guarantees all threads are
565 // covered, and SuspendedThreadList is never really used.
ReportUnsuspendedThreads(const SuspendedThreadsList &)566 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
567
568 #else // !SANITIZER_FUCHSIA
569
ReportUnsuspendedThreads(const SuspendedThreadsList & suspended_threads)570 static void ReportUnsuspendedThreads(
571 const SuspendedThreadsList &suspended_threads) {
572 InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
573 for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
574 threads[i] = suspended_threads.GetThreadID(i);
575
576 Sort(threads.data(), threads.size());
577
578 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
579 &ReportIfNotSuspended, &threads);
580 }
581
582 #endif // !SANITIZER_FUCHSIA
583
CheckForLeaksCallback(const SuspendedThreadsList & suspended_threads,void * arg)584 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
585 void *arg) {
586 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
587 CHECK(param);
588 CHECK(!param->success);
589 ReportUnsuspendedThreads(suspended_threads);
590 ClassifyAllChunks(suspended_threads, ¶m->frontier);
591 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
592 // Clean up for subsequent leak checks. This assumes we did not overwrite any
593 // kIgnored tags.
594 ForEachChunk(ResetTagsCb, nullptr);
595 param->success = true;
596 }
597
CheckForLeaks()598 static bool CheckForLeaks() {
599 if (&__lsan_is_turned_off && __lsan_is_turned_off())
600 return false;
601 EnsureMainThreadIDIsCorrect();
602 CheckForLeaksParam param;
603 LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);
604
605 if (!param.success) {
606 Report("LeakSanitizer has encountered a fatal error.\n");
607 Report(
608 "HINT: For debugging, try setting environment variable "
609 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
610 Report(
611 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
612 Die();
613 }
614 param.leak_report.ApplySuppressions();
615 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
616 if (unsuppressed_count > 0) {
617 Decorator d;
618 Printf("\n"
619 "================================================================="
620 "\n");
621 Printf("%s", d.Error());
622 Report("ERROR: LeakSanitizer: detected memory leaks\n");
623 Printf("%s", d.Default());
624 param.leak_report.ReportTopLeaks(flags()->max_leaks);
625 }
626 if (common_flags()->print_suppressions)
627 PrintMatchedSuppressions();
628 if (unsuppressed_count > 0) {
629 param.leak_report.PrintSummary();
630 return true;
631 }
632 return false;
633 }
634
635 static bool has_reported_leaks = false;
HasReportedLeaks()636 bool HasReportedLeaks() { return has_reported_leaks; }
637
DoLeakCheck()638 void DoLeakCheck() {
639 BlockingMutexLock l(&global_mutex);
640 static bool already_done;
641 if (already_done) return;
642 already_done = true;
643 has_reported_leaks = CheckForLeaks();
644 if (has_reported_leaks) HandleLeaks();
645 }
646
DoRecoverableLeakCheck()647 static int DoRecoverableLeakCheck() {
648 BlockingMutexLock l(&global_mutex);
649 bool have_leaks = CheckForLeaks();
650 return have_leaks ? 1 : 0;
651 }
652
DoRecoverableLeakCheckVoid()653 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
654
GetSuppressionForAddr(uptr addr)655 static Suppression *GetSuppressionForAddr(uptr addr) {
656 Suppression *s = nullptr;
657
658 // Suppress by module name.
659 SuppressionContext *suppressions = GetSuppressionContext();
660 if (const char *module_name =
661 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
662 if (suppressions->Match(module_name, kSuppressionLeak, &s))
663 return s;
664
665 // Suppress by file or function name.
666 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
667 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
668 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
669 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
670 break;
671 }
672 }
673 frames->ClearAll();
674 return s;
675 }
676
GetSuppressionForStack(u32 stack_trace_id)677 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
678 StackTrace stack = StackDepotGet(stack_trace_id);
679 for (uptr i = 0; i < stack.size; i++) {
680 Suppression *s = GetSuppressionForAddr(
681 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
682 if (s) return s;
683 }
684 return nullptr;
685 }
686
687 ///// LeakReport implementation. /////
688
689 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
690 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
691 // in real-world applications.
692 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
693 // use a hash table.
694 const uptr kMaxLeaksConsidered = 5000;
695
AddLeakedChunk(uptr chunk,u32 stack_trace_id,uptr leaked_size,ChunkTag tag)696 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
697 uptr leaked_size, ChunkTag tag) {
698 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
699 bool is_directly_leaked = (tag == kDirectlyLeaked);
700 uptr i;
701 for (i = 0; i < leaks_.size(); i++) {
702 if (leaks_[i].stack_trace_id == stack_trace_id &&
703 leaks_[i].is_directly_leaked == is_directly_leaked) {
704 leaks_[i].hit_count++;
705 leaks_[i].total_size += leaked_size;
706 break;
707 }
708 }
709 if (i == leaks_.size()) {
710 if (leaks_.size() == kMaxLeaksConsidered) return;
711 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
712 is_directly_leaked, /* is_suppressed */ false };
713 leaks_.push_back(leak);
714 }
715 if (flags()->report_objects) {
716 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
717 leaked_objects_.push_back(obj);
718 }
719 }
720
LeakComparator(const Leak & leak1,const Leak & leak2)721 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
722 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
723 return leak1.total_size > leak2.total_size;
724 else
725 return leak1.is_directly_leaked;
726 }
727
ReportTopLeaks(uptr num_leaks_to_report)728 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
729 CHECK(leaks_.size() <= kMaxLeaksConsidered);
730 Printf("\n");
731 if (leaks_.size() == kMaxLeaksConsidered)
732 Printf("Too many leaks! Only the first %zu leaks encountered will be "
733 "reported.\n",
734 kMaxLeaksConsidered);
735
736 uptr unsuppressed_count = UnsuppressedLeakCount();
737 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
738 Printf("The %zu top leak(s):\n", num_leaks_to_report);
739 Sort(leaks_.data(), leaks_.size(), &LeakComparator);
740 uptr leaks_reported = 0;
741 for (uptr i = 0; i < leaks_.size(); i++) {
742 if (leaks_[i].is_suppressed) continue;
743 PrintReportForLeak(i);
744 leaks_reported++;
745 if (leaks_reported == num_leaks_to_report) break;
746 }
747 if (leaks_reported < unsuppressed_count) {
748 uptr remaining = unsuppressed_count - leaks_reported;
749 Printf("Omitting %zu more leak(s).\n", remaining);
750 }
751 }
752
PrintReportForLeak(uptr index)753 void LeakReport::PrintReportForLeak(uptr index) {
754 Decorator d;
755 Printf("%s", d.Leak());
756 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
757 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
758 leaks_[index].total_size, leaks_[index].hit_count);
759 Printf("%s", d.Default());
760
761 PrintStackTraceById(leaks_[index].stack_trace_id);
762
763 if (flags()->report_objects) {
764 Printf("Objects leaked above:\n");
765 PrintLeakedObjectsForLeak(index);
766 Printf("\n");
767 }
768 }
769
PrintLeakedObjectsForLeak(uptr index)770 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
771 u32 leak_id = leaks_[index].id;
772 for (uptr j = 0; j < leaked_objects_.size(); j++) {
773 if (leaked_objects_[j].leak_id == leak_id)
774 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
775 leaked_objects_[j].size);
776 }
777 }
778
PrintSummary()779 void LeakReport::PrintSummary() {
780 CHECK(leaks_.size() <= kMaxLeaksConsidered);
781 uptr bytes = 0, allocations = 0;
782 for (uptr i = 0; i < leaks_.size(); i++) {
783 if (leaks_[i].is_suppressed) continue;
784 bytes += leaks_[i].total_size;
785 allocations += leaks_[i].hit_count;
786 }
787 InternalScopedString summary(kMaxSummaryLength);
788 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
789 allocations);
790 ReportErrorSummary(summary.data());
791 }
792
ApplySuppressions()793 void LeakReport::ApplySuppressions() {
794 for (uptr i = 0; i < leaks_.size(); i++) {
795 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
796 if (s) {
797 s->weight += leaks_[i].total_size;
798 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
799 leaks_[i].hit_count);
800 leaks_[i].is_suppressed = true;
801 }
802 }
803 }
804
UnsuppressedLeakCount()805 uptr LeakReport::UnsuppressedLeakCount() {
806 uptr result = 0;
807 for (uptr i = 0; i < leaks_.size(); i++)
808 if (!leaks_[i].is_suppressed) result++;
809 return result;
810 }
811
812 } // namespace __lsan
813 #else // CAN_SANITIZE_LEAKS
814 namespace __lsan {
InitCommonLsan()815 void InitCommonLsan() { }
DoLeakCheck()816 void DoLeakCheck() { }
DoRecoverableLeakCheckVoid()817 void DoRecoverableLeakCheckVoid() { }
DisableInThisThread()818 void DisableInThisThread() { }
EnableInThisThread()819 void EnableInThisThread() { }
820 }
821 #endif // CAN_SANITIZE_LEAKS
822
823 using namespace __lsan;
824
825 extern "C" {
826 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)827 void __lsan_ignore_object(const void *p) {
828 #if CAN_SANITIZE_LEAKS
829 if (!common_flags()->detect_leaks)
830 return;
831 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
832 // locked.
833 BlockingMutexLock l(&global_mutex);
834 IgnoreObjectResult res = IgnoreObjectLocked(p);
835 if (res == kIgnoreObjectInvalid)
836 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
837 if (res == kIgnoreObjectAlreadyIgnored)
838 VReport(1, "__lsan_ignore_object(): "
839 "heap object at %p is already being ignored\n", p);
840 if (res == kIgnoreObjectSuccess)
841 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
842 #endif // CAN_SANITIZE_LEAKS
843 }
844
845 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_register_root_region(const void * begin,uptr size)846 void __lsan_register_root_region(const void *begin, uptr size) {
847 #if CAN_SANITIZE_LEAKS
848 BlockingMutexLock l(&global_mutex);
849 CHECK(root_regions);
850 RootRegion region = {reinterpret_cast<uptr>(begin), size};
851 root_regions->push_back(region);
852 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
853 #endif // CAN_SANITIZE_LEAKS
854 }
855
856 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_unregister_root_region(const void * begin,uptr size)857 void __lsan_unregister_root_region(const void *begin, uptr size) {
858 #if CAN_SANITIZE_LEAKS
859 BlockingMutexLock l(&global_mutex);
860 CHECK(root_regions);
861 bool removed = false;
862 for (uptr i = 0; i < root_regions->size(); i++) {
863 RootRegion region = (*root_regions)[i];
864 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
865 removed = true;
866 uptr last_index = root_regions->size() - 1;
867 (*root_regions)[i] = (*root_regions)[last_index];
868 root_regions->pop_back();
869 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
870 break;
871 }
872 }
873 if (!removed) {
874 Report(
875 "__lsan_unregister_root_region(): region at %p of size %llu has not "
876 "been registered.\n",
877 begin, size);
878 Die();
879 }
880 #endif // CAN_SANITIZE_LEAKS
881 }
882
883 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()884 void __lsan_disable() {
885 #if CAN_SANITIZE_LEAKS
886 __lsan::DisableInThisThread();
887 #endif
888 }
889
890 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()891 void __lsan_enable() {
892 #if CAN_SANITIZE_LEAKS
893 __lsan::EnableInThisThread();
894 #endif
895 }
896
897 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()898 void __lsan_do_leak_check() {
899 #if CAN_SANITIZE_LEAKS
900 if (common_flags()->detect_leaks)
901 __lsan::DoLeakCheck();
902 #endif // CAN_SANITIZE_LEAKS
903 }
904
905 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_recoverable_leak_check()906 int __lsan_do_recoverable_leak_check() {
907 #if CAN_SANITIZE_LEAKS
908 if (common_flags()->detect_leaks)
909 return __lsan::DoRecoverableLeakCheck();
910 #endif // CAN_SANITIZE_LEAKS
911 return 0;
912 }
913
SANITIZER_INTERFACE_WEAK_DEF(const char *,__lsan_default_options,void)914 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
915 return "";
916 }
917
918 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
919 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_is_turned_off()920 int __lsan_is_turned_off() {
921 return 0;
922 }
923
924 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_default_suppressions()925 const char *__lsan_default_suppressions() {
926 return "";
927 }
928 #endif
929 } // extern "C"
930