1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of LeakSanitizer.
11 // Implementation of common leak checking functionality.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "lsan_common.h"
16
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_flag_parser.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_procmaps.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_report_decorator.h"
26
27 #if CAN_SANITIZE_LEAKS
28 namespace __lsan {
29
30 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
31 // also to protect the global list of root regions.
32 BlockingMutex global_mutex(LINKER_INITIALIZED);
33
34 THREADLOCAL int disable_counter;
DisabledInThisThread()35 bool DisabledInThisThread() { return disable_counter > 0; }
36
37 Flags lsan_flags;
38
SetDefaults()39 void Flags::SetDefaults() {
40 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
41 #include "lsan_flags.inc"
42 #undef LSAN_FLAG
43 }
44
RegisterLsanFlags(FlagParser * parser,Flags * f)45 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
46 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
47 RegisterFlag(parser, #Name, Description, &f->Name);
48 #include "lsan_flags.inc"
49 #undef LSAN_FLAG
50 }
51
52 #define LOG_POINTERS(...) \
53 do { \
54 if (flags()->log_pointers) Report(__VA_ARGS__); \
55 } while (0);
56
57 #define LOG_THREADS(...) \
58 do { \
59 if (flags()->log_threads) Report(__VA_ARGS__); \
60 } while (0);
61
62 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
63 static SuppressionContext *suppression_ctx = nullptr;
64 static const char kSuppressionLeak[] = "leak";
65 static const char *kSuppressionTypes[] = { kSuppressionLeak };
66
InitializeSuppressions()67 void InitializeSuppressions() {
68 CHECK_EQ(nullptr, suppression_ctx);
69 suppression_ctx = new (suppression_placeholder) // NOLINT
70 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
71 suppression_ctx->ParseFromFile(flags()->suppressions);
72 if (&__lsan_default_suppressions)
73 suppression_ctx->Parse(__lsan_default_suppressions());
74 }
75
GetSuppressionContext()76 static SuppressionContext *GetSuppressionContext() {
77 CHECK(suppression_ctx);
78 return suppression_ctx;
79 }
80
81 struct RootRegion {
82 const void *begin;
83 uptr size;
84 };
85
86 InternalMmapVector<RootRegion> *root_regions;
87
InitializeRootRegions()88 void InitializeRootRegions() {
89 CHECK(!root_regions);
90 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
91 root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
92 }
93
InitCommonLsan()94 void InitCommonLsan() {
95 InitializeRootRegions();
96 if (common_flags()->detect_leaks) {
97 // Initialization which can fail or print warnings should only be done if
98 // LSan is actually enabled.
99 InitializeSuppressions();
100 InitializePlatformSpecificModules();
101 }
102 }
103
104 class Decorator: public __sanitizer::SanitizerCommonDecorator {
105 public:
Decorator()106 Decorator() : SanitizerCommonDecorator() { }
Error()107 const char *Error() { return Red(); }
Leak()108 const char *Leak() { return Blue(); }
End()109 const char *End() { return Default(); }
110 };
111
CanBeAHeapPointer(uptr p)112 static inline bool CanBeAHeapPointer(uptr p) {
113 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
114 // bound on heap addresses.
115 const uptr kMinAddress = 4 * 4096;
116 if (p < kMinAddress) return false;
117 #if defined(__x86_64__)
118 // Accept only canonical form user-space addresses.
119 return ((p >> 47) == 0);
120 #elif defined(__mips64)
121 return ((p >> 40) == 0);
122 #else
123 return true;
124 #endif
125 }
126
127 // Scans the memory range, looking for byte patterns that point into allocator
128 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
129 // There are two usage modes for this function: finding reachable or ignored
130 // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
131 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
132 // so |frontier| = 0.
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)133 void ScanRangeForPointers(uptr begin, uptr end,
134 Frontier *frontier,
135 const char *region_type, ChunkTag tag) {
136 const uptr alignment = flags()->pointer_alignment();
137 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
138 uptr pp = begin;
139 if (pp % alignment)
140 pp = pp + alignment - pp % alignment;
141 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
142 void *p = *reinterpret_cast<void **>(pp);
143 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
144 uptr chunk = PointsIntoChunk(p);
145 if (!chunk) continue;
146 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
147 if (chunk == begin) continue;
148 LsanMetadata m(chunk);
149 // Reachable beats ignored beats leaked.
150 if (m.tag() == kReachable) continue;
151 if (m.tag() == kIgnored && tag != kReachable) continue;
152
153 // Do this check relatively late so we can log only the interesting cases.
154 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
155 LOG_POINTERS(
156 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
157 "%zu.\n",
158 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
159 continue;
160 }
161
162 m.set_tag(tag);
163 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
164 chunk, chunk + m.requested_size(), m.requested_size());
165 if (frontier)
166 frontier->push_back(chunk);
167 }
168 }
169
ForEachExtraStackRangeCb(uptr begin,uptr end,void * arg)170 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
171 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
172 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
173 }
174
175 // Scans thread data (stacks and TLS) for heap pointers.
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier)176 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
177 Frontier *frontier) {
178 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
179 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
180 uptr registers_end = registers_begin + registers.size();
181 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
182 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
183 LOG_THREADS("Processing thread %d.\n", os_id);
184 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
185 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
186 &tls_begin, &tls_end,
187 &cache_begin, &cache_end);
188 if (!thread_found) {
189 // If a thread can't be found in the thread registry, it's probably in the
190 // process of destruction. Log this event and move on.
191 LOG_THREADS("Thread %d not found in registry.\n", os_id);
192 continue;
193 }
194 uptr sp;
195 bool have_registers =
196 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
197 if (!have_registers) {
198 Report("Unable to get registers from thread %d.\n");
199 // If unable to get SP, consider the entire stack to be reachable.
200 sp = stack_begin;
201 }
202
203 if (flags()->use_registers && have_registers)
204 ScanRangeForPointers(registers_begin, registers_end, frontier,
205 "REGISTERS", kReachable);
206
207 if (flags()->use_stacks) {
208 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
209 if (sp < stack_begin || sp >= stack_end) {
210 // SP is outside the recorded stack range (e.g. the thread is running a
211 // signal handler on alternate stack). Again, consider the entire stack
212 // range to be reachable.
213 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
214 } else {
215 // Shrink the stack range to ignore out-of-scope values.
216 stack_begin = sp;
217 }
218 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
219 kReachable);
220 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
221 }
222
223 if (flags()->use_tls) {
224 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
225 if (cache_begin == cache_end) {
226 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
227 } else {
228 // Because LSan should not be loaded with dlopen(), we can assume
229 // that allocator cache will be part of static TLS image.
230 CHECK_LE(tls_begin, cache_begin);
231 CHECK_GE(tls_end, cache_end);
232 if (tls_begin < cache_begin)
233 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
234 kReachable);
235 if (tls_end > cache_end)
236 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
237 }
238 }
239 }
240 }
241
ProcessRootRegion(Frontier * frontier,uptr root_begin,uptr root_end)242 static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
243 uptr root_end) {
244 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
245 uptr begin, end, prot;
246 while (proc_maps.Next(&begin, &end,
247 /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
248 &prot)) {
249 uptr intersection_begin = Max(root_begin, begin);
250 uptr intersection_end = Min(end, root_end);
251 if (intersection_begin >= intersection_end) continue;
252 bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
253 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
254 root_begin, root_end, begin, end,
255 is_readable ? "readable" : "unreadable");
256 if (is_readable)
257 ScanRangeForPointers(intersection_begin, intersection_end, frontier,
258 "ROOT", kReachable);
259 }
260 }
261
262 // Scans root regions for heap pointers.
ProcessRootRegions(Frontier * frontier)263 static void ProcessRootRegions(Frontier *frontier) {
264 if (!flags()->use_root_regions) return;
265 CHECK(root_regions);
266 for (uptr i = 0; i < root_regions->size(); i++) {
267 RootRegion region = (*root_regions)[i];
268 uptr begin_addr = reinterpret_cast<uptr>(region.begin);
269 ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
270 }
271 }
272
FloodFillTag(Frontier * frontier,ChunkTag tag)273 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
274 while (frontier->size()) {
275 uptr next_chunk = frontier->back();
276 frontier->pop_back();
277 LsanMetadata m(next_chunk);
278 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
279 "HEAP", tag);
280 }
281 }
282
283 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
284 // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)285 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
286 chunk = GetUserBegin(chunk);
287 LsanMetadata m(chunk);
288 if (m.allocated() && m.tag() != kReachable) {
289 ScanRangeForPointers(chunk, chunk + m.requested_size(),
290 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
291 }
292 }
293
294 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
295 // frontier.
CollectIgnoredCb(uptr chunk,void * arg)296 static void CollectIgnoredCb(uptr chunk, void *arg) {
297 CHECK(arg);
298 chunk = GetUserBegin(chunk);
299 LsanMetadata m(chunk);
300 if (m.allocated() && m.tag() == kIgnored)
301 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
302 }
303
304 // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads)305 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
306 // Holds the flood fill frontier.
307 Frontier frontier(1);
308
309 ProcessGlobalRegions(&frontier);
310 ProcessThreads(suspended_threads, &frontier);
311 ProcessRootRegions(&frontier);
312 FloodFillTag(&frontier, kReachable);
313 // The check here is relatively expensive, so we do this in a separate flood
314 // fill. That way we can skip the check for chunks that are reachable
315 // otherwise.
316 LOG_POINTERS("Processing platform-specific allocations.\n");
317 ProcessPlatformSpecificAllocations(&frontier);
318 FloodFillTag(&frontier, kReachable);
319
320 LOG_POINTERS("Scanning ignored chunks.\n");
321 CHECK_EQ(0, frontier.size());
322 ForEachChunk(CollectIgnoredCb, &frontier);
323 FloodFillTag(&frontier, kIgnored);
324
325 // Iterate over leaked chunks and mark those that are reachable from other
326 // leaked chunks.
327 LOG_POINTERS("Scanning leaked chunks.\n");
328 ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
329 }
330
PrintStackTraceById(u32 stack_trace_id)331 static void PrintStackTraceById(u32 stack_trace_id) {
332 CHECK(stack_trace_id);
333 StackDepotGet(stack_trace_id).Print();
334 }
335
336 // ForEachChunk callback. Aggregates information about unreachable chunks into
337 // a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)338 static void CollectLeaksCb(uptr chunk, void *arg) {
339 CHECK(arg);
340 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
341 chunk = GetUserBegin(chunk);
342 LsanMetadata m(chunk);
343 if (!m.allocated()) return;
344 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
345 u32 resolution = flags()->resolution;
346 u32 stack_trace_id = 0;
347 if (resolution > 0) {
348 StackTrace stack = StackDepotGet(m.stack_trace_id());
349 stack.size = Min(stack.size, resolution);
350 stack_trace_id = StackDepotPut(stack);
351 } else {
352 stack_trace_id = m.stack_trace_id();
353 }
354 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
355 m.tag());
356 }
357 }
358
PrintMatchedSuppressions()359 static void PrintMatchedSuppressions() {
360 InternalMmapVector<Suppression *> matched(1);
361 GetSuppressionContext()->GetMatched(&matched);
362 if (!matched.size())
363 return;
364 const char *line = "-----------------------------------------------------";
365 Printf("%s\n", line);
366 Printf("Suppressions used:\n");
367 Printf(" count bytes template\n");
368 for (uptr i = 0; i < matched.size(); i++)
369 Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
370 matched[i]->weight, matched[i]->templ);
371 Printf("%s\n\n", line);
372 }
373
374 struct DoLeakCheckParam {
375 bool success;
376 LeakReport leak_report;
377 };
378
DoLeakCheckCallback(const SuspendedThreadsList & suspended_threads,void * arg)379 static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
380 void *arg) {
381 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
382 CHECK(param);
383 CHECK(!param->success);
384 ClassifyAllChunks(suspended_threads);
385 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
386 param->success = true;
387 }
388
DoLeakCheck()389 void DoLeakCheck() {
390 EnsureMainThreadIDIsCorrect();
391 BlockingMutexLock l(&global_mutex);
392 static bool already_done;
393 if (already_done) return;
394 already_done = true;
395 if (&__lsan_is_turned_off && __lsan_is_turned_off())
396 return;
397
398 DoLeakCheckParam param;
399 param.success = false;
400 LockThreadRegistry();
401 LockAllocator();
402 DoStopTheWorld(DoLeakCheckCallback, ¶m);
403 UnlockAllocator();
404 UnlockThreadRegistry();
405
406 if (!param.success) {
407 Report("LeakSanitizer has encountered a fatal error.\n");
408 Die();
409 }
410 param.leak_report.ApplySuppressions();
411 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
412 if (unsuppressed_count > 0) {
413 Decorator d;
414 Printf("\n"
415 "================================================================="
416 "\n");
417 Printf("%s", d.Error());
418 Report("ERROR: LeakSanitizer: detected memory leaks\n");
419 Printf("%s", d.End());
420 param.leak_report.ReportTopLeaks(flags()->max_leaks);
421 }
422 if (common_flags()->print_suppressions)
423 PrintMatchedSuppressions();
424 if (unsuppressed_count > 0) {
425 param.leak_report.PrintSummary();
426 if (flags()->exitcode) {
427 if (common_flags()->coverage)
428 __sanitizer_cov_dump();
429 internal__exit(flags()->exitcode);
430 }
431 }
432 }
433
GetSuppressionForAddr(uptr addr)434 static Suppression *GetSuppressionForAddr(uptr addr) {
435 Suppression *s = nullptr;
436
437 // Suppress by module name.
438 SuppressionContext *suppressions = GetSuppressionContext();
439 if (const char *module_name =
440 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
441 if (suppressions->Match(module_name, kSuppressionLeak, &s))
442 return s;
443
444 // Suppress by file or function name.
445 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
446 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
447 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
448 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
449 break;
450 }
451 }
452 frames->ClearAll();
453 return s;
454 }
455
GetSuppressionForStack(u32 stack_trace_id)456 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
457 StackTrace stack = StackDepotGet(stack_trace_id);
458 for (uptr i = 0; i < stack.size; i++) {
459 Suppression *s = GetSuppressionForAddr(
460 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
461 if (s) return s;
462 }
463 return 0;
464 }
465
466 ///// LeakReport implementation. /////
467
468 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
469 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
470 // in real-world applications.
471 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
472 // use a hash table.
473 const uptr kMaxLeaksConsidered = 5000;
474
AddLeakedChunk(uptr chunk,u32 stack_trace_id,uptr leaked_size,ChunkTag tag)475 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
476 uptr leaked_size, ChunkTag tag) {
477 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
478 bool is_directly_leaked = (tag == kDirectlyLeaked);
479 uptr i;
480 for (i = 0; i < leaks_.size(); i++) {
481 if (leaks_[i].stack_trace_id == stack_trace_id &&
482 leaks_[i].is_directly_leaked == is_directly_leaked) {
483 leaks_[i].hit_count++;
484 leaks_[i].total_size += leaked_size;
485 break;
486 }
487 }
488 if (i == leaks_.size()) {
489 if (leaks_.size() == kMaxLeaksConsidered) return;
490 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
491 is_directly_leaked, /* is_suppressed */ false };
492 leaks_.push_back(leak);
493 }
494 if (flags()->report_objects) {
495 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
496 leaked_objects_.push_back(obj);
497 }
498 }
499
LeakComparator(const Leak & leak1,const Leak & leak2)500 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
501 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
502 return leak1.total_size > leak2.total_size;
503 else
504 return leak1.is_directly_leaked;
505 }
506
ReportTopLeaks(uptr num_leaks_to_report)507 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
508 CHECK(leaks_.size() <= kMaxLeaksConsidered);
509 Printf("\n");
510 if (leaks_.size() == kMaxLeaksConsidered)
511 Printf("Too many leaks! Only the first %zu leaks encountered will be "
512 "reported.\n",
513 kMaxLeaksConsidered);
514
515 uptr unsuppressed_count = UnsuppressedLeakCount();
516 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
517 Printf("The %zu top leak(s):\n", num_leaks_to_report);
518 InternalSort(&leaks_, leaks_.size(), LeakComparator);
519 uptr leaks_reported = 0;
520 for (uptr i = 0; i < leaks_.size(); i++) {
521 if (leaks_[i].is_suppressed) continue;
522 PrintReportForLeak(i);
523 leaks_reported++;
524 if (leaks_reported == num_leaks_to_report) break;
525 }
526 if (leaks_reported < unsuppressed_count) {
527 uptr remaining = unsuppressed_count - leaks_reported;
528 Printf("Omitting %zu more leak(s).\n", remaining);
529 }
530 }
531
PrintReportForLeak(uptr index)532 void LeakReport::PrintReportForLeak(uptr index) {
533 Decorator d;
534 Printf("%s", d.Leak());
535 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
536 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
537 leaks_[index].total_size, leaks_[index].hit_count);
538 Printf("%s", d.End());
539
540 PrintStackTraceById(leaks_[index].stack_trace_id);
541
542 if (flags()->report_objects) {
543 Printf("Objects leaked above:\n");
544 PrintLeakedObjectsForLeak(index);
545 Printf("\n");
546 }
547 }
548
PrintLeakedObjectsForLeak(uptr index)549 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
550 u32 leak_id = leaks_[index].id;
551 for (uptr j = 0; j < leaked_objects_.size(); j++) {
552 if (leaked_objects_[j].leak_id == leak_id)
553 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
554 leaked_objects_[j].size);
555 }
556 }
557
PrintSummary()558 void LeakReport::PrintSummary() {
559 CHECK(leaks_.size() <= kMaxLeaksConsidered);
560 uptr bytes = 0, allocations = 0;
561 for (uptr i = 0; i < leaks_.size(); i++) {
562 if (leaks_[i].is_suppressed) continue;
563 bytes += leaks_[i].total_size;
564 allocations += leaks_[i].hit_count;
565 }
566 InternalScopedString summary(kMaxSummaryLength);
567 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
568 allocations);
569 ReportErrorSummary(summary.data());
570 }
571
ApplySuppressions()572 void LeakReport::ApplySuppressions() {
573 for (uptr i = 0; i < leaks_.size(); i++) {
574 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
575 if (s) {
576 s->weight += leaks_[i].total_size;
577 s->hit_count += leaks_[i].hit_count;
578 leaks_[i].is_suppressed = true;
579 }
580 }
581 }
582
UnsuppressedLeakCount()583 uptr LeakReport::UnsuppressedLeakCount() {
584 uptr result = 0;
585 for (uptr i = 0; i < leaks_.size(); i++)
586 if (!leaks_[i].is_suppressed) result++;
587 return result;
588 }
589
590 } // namespace __lsan
591 #endif // CAN_SANITIZE_LEAKS
592
593 using namespace __lsan; // NOLINT
594
595 extern "C" {
596 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)597 void __lsan_ignore_object(const void *p) {
598 #if CAN_SANITIZE_LEAKS
599 if (!common_flags()->detect_leaks)
600 return;
601 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
602 // locked.
603 BlockingMutexLock l(&global_mutex);
604 IgnoreObjectResult res = IgnoreObjectLocked(p);
605 if (res == kIgnoreObjectInvalid)
606 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
607 if (res == kIgnoreObjectAlreadyIgnored)
608 VReport(1, "__lsan_ignore_object(): "
609 "heap object at %p is already being ignored\n", p);
610 if (res == kIgnoreObjectSuccess)
611 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
612 #endif // CAN_SANITIZE_LEAKS
613 }
614
615 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_register_root_region(const void * begin,uptr size)616 void __lsan_register_root_region(const void *begin, uptr size) {
617 #if CAN_SANITIZE_LEAKS
618 BlockingMutexLock l(&global_mutex);
619 CHECK(root_regions);
620 RootRegion region = {begin, size};
621 root_regions->push_back(region);
622 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
623 #endif // CAN_SANITIZE_LEAKS
624 }
625
626 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_unregister_root_region(const void * begin,uptr size)627 void __lsan_unregister_root_region(const void *begin, uptr size) {
628 #if CAN_SANITIZE_LEAKS
629 BlockingMutexLock l(&global_mutex);
630 CHECK(root_regions);
631 bool removed = false;
632 for (uptr i = 0; i < root_regions->size(); i++) {
633 RootRegion region = (*root_regions)[i];
634 if (region.begin == begin && region.size == size) {
635 removed = true;
636 uptr last_index = root_regions->size() - 1;
637 (*root_regions)[i] = (*root_regions)[last_index];
638 root_regions->pop_back();
639 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
640 break;
641 }
642 }
643 if (!removed) {
644 Report(
645 "__lsan_unregister_root_region(): region at %p of size %llu has not "
646 "been registered.\n",
647 begin, size);
648 Die();
649 }
650 #endif // CAN_SANITIZE_LEAKS
651 }
652
653 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()654 void __lsan_disable() {
655 #if CAN_SANITIZE_LEAKS
656 __lsan::disable_counter++;
657 #endif
658 }
659
660 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()661 void __lsan_enable() {
662 #if CAN_SANITIZE_LEAKS
663 if (!__lsan::disable_counter && common_flags()->detect_leaks) {
664 Report("Unmatched call to __lsan_enable().\n");
665 Die();
666 }
667 __lsan::disable_counter--;
668 #endif
669 }
670
671 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()672 void __lsan_do_leak_check() {
673 #if CAN_SANITIZE_LEAKS
674 if (common_flags()->detect_leaks)
675 __lsan::DoLeakCheck();
676 #endif // CAN_SANITIZE_LEAKS
677 }
678
679 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
680 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_is_turned_off()681 int __lsan_is_turned_off() {
682 return 0;
683 }
684 #endif
685 } // extern "C"
686