xref: /freebsd/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "lsan_common.h"
15 
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27 
28 extern "C" const char *__lsan_current_stage = "unknown";
29 
30 #if CAN_SANITIZE_LEAKS
31 namespace __lsan {
32 
33 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
34 // also to protect the global list of root regions.
35 BlockingMutex global_mutex(LINKER_INITIALIZED);
36 
37 Flags lsan_flags;
38 
39 
40 void DisableCounterUnderflow() {
41   if (common_flags()->detect_leaks) {
42     Report("Unmatched call to __lsan_enable().\n");
43     Die();
44   }
45 }
46 
47 void Flags::SetDefaults() {
48 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
49 #include "lsan_flags.inc"
50 #undef LSAN_FLAG
51 }
52 
53 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
54 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
55   RegisterFlag(parser, #Name, Description, &f->Name);
56 #include "lsan_flags.inc"
57 #undef LSAN_FLAG
58 }
59 
60 #define LOG_POINTERS(...)                           \
61   do {                                              \
62     if (flags()->log_pointers) Report(__VA_ARGS__); \
63   } while (0)
64 
65 #define LOG_THREADS(...)                           \
66   do {                                             \
67     if (flags()->log_threads) Report(__VA_ARGS__); \
68   } while (0)
69 
70 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
71 static SuppressionContext *suppression_ctx = nullptr;
72 static const char kSuppressionLeak[] = "leak";
73 static const char *kSuppressionTypes[] = { kSuppressionLeak };
74 static const char kStdSuppressions[] =
75 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
76   // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
77   // definition.
78   "leak:*pthread_exit*\n"
79 #endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
80 #if SANITIZER_MAC
81   // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
82   "leak:*_os_trace*\n"
83 #endif
84   // TLS leak in some glibc versions, described in
85   // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
86   "leak:*tls_get_addr*\n";
87 
88 void InitializeSuppressions() {
89   CHECK_EQ(nullptr, suppression_ctx);
90   suppression_ctx = new (suppression_placeholder)
91       SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
92   suppression_ctx->ParseFromFile(flags()->suppressions);
93   if (&__lsan_default_suppressions)
94     suppression_ctx->Parse(__lsan_default_suppressions());
95   suppression_ctx->Parse(kStdSuppressions);
96 }
97 
98 static SuppressionContext *GetSuppressionContext() {
99   CHECK(suppression_ctx);
100   return suppression_ctx;
101 }
102 
103 static InternalMmapVector<RootRegion> *root_regions;
104 
105 InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
106 
107 void InitializeRootRegions() {
108   CHECK(!root_regions);
109   ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
110   root_regions = new (placeholder) InternalMmapVector<RootRegion>();
111 }
112 
113 const char *MaybeCallLsanDefaultOptions() {
114   return (&__lsan_default_options) ? __lsan_default_options() : "";
115 }
116 
117 void InitCommonLsan() {
118   InitializeRootRegions();
119   if (common_flags()->detect_leaks) {
120     // Initialization which can fail or print warnings should only be done if
121     // LSan is actually enabled.
122     InitializeSuppressions();
123     InitializePlatformSpecificModules();
124   }
125 }
126 
127 class Decorator: public __sanitizer::SanitizerCommonDecorator {
128  public:
129   Decorator() : SanitizerCommonDecorator() { }
130   const char *Error() { return Red(); }
131   const char *Leak() { return Blue(); }
132 };
133 
134 static inline bool CanBeAHeapPointer(uptr p) {
135   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
136   // bound on heap addresses.
137   const uptr kMinAddress = 4 * 4096;
138   if (p < kMinAddress) return false;
139 #if defined(__x86_64__)
140   // Accept only canonical form user-space addresses.
141   return ((p >> 47) == 0);
142 #elif defined(__mips64)
143   return ((p >> 40) == 0);
144 #elif defined(__aarch64__)
145   unsigned runtimeVMA =
146     (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
147   return ((p >> runtimeVMA) == 0);
148 #else
149   return true;
150 #endif
151 }
152 
153 // Scans the memory range, looking for byte patterns that point into allocator
154 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
155 // There are two usage modes for this function: finding reachable chunks
156 // (|tag| = kReachable) and finding indirectly leaked chunks
157 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
158 // so |frontier| = 0.
159 void ScanRangeForPointers(uptr begin, uptr end,
160                           Frontier *frontier,
161                           const char *region_type, ChunkTag tag) {
162   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
163   const uptr alignment = flags()->pointer_alignment();
164   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
165   uptr pp = begin;
166   if (pp % alignment)
167     pp = pp + alignment - pp % alignment;
168   for (; pp + sizeof(void *) <= end; pp += alignment) {
169     void *p = *reinterpret_cast<void **>(pp);
170     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
171     uptr chunk = PointsIntoChunk(p);
172     if (!chunk) continue;
173     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
174     if (chunk == begin) continue;
175     LsanMetadata m(chunk);
176     if (m.tag() == kReachable || m.tag() == kIgnored) continue;
177 
178     // Do this check relatively late so we can log only the interesting cases.
179     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
180       LOG_POINTERS(
181           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
182           "%zu.\n",
183           pp, p, chunk, chunk + m.requested_size(), m.requested_size());
184       continue;
185     }
186 
187     m.set_tag(tag);
188     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
189                  chunk, chunk + m.requested_size(), m.requested_size());
190     if (frontier)
191       frontier->push_back(chunk);
192   }
193 }
194 
195 // Scans a global range for pointers
196 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
197   uptr allocator_begin = 0, allocator_end = 0;
198   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
199   if (begin <= allocator_begin && allocator_begin < end) {
200     CHECK_LE(allocator_begin, allocator_end);
201     CHECK_LE(allocator_end, end);
202     if (begin < allocator_begin)
203       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
204                            kReachable);
205     if (allocator_end < end)
206       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
207   } else {
208     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
209   }
210 }
211 
212 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
213   Frontier *frontier = reinterpret_cast<Frontier *>(arg);
214   ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
215 }
216 
217 #if SANITIZER_FUCHSIA
218 
219 // Fuchsia handles all threads together with its own callback.
220 static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
221 
222 #else
223 
224 // Scans thread data (stacks and TLS) for heap pointers.
225 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
226                            Frontier *frontier) {
227   InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
228   uptr registers_begin = reinterpret_cast<uptr>(registers.data());
229   uptr registers_end =
230       reinterpret_cast<uptr>(registers.data() + registers.size());
231   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
232     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
233     LOG_THREADS("Processing thread %d.\n", os_id);
234     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
235     DTLS *dtls;
236     bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
237                                               &tls_begin, &tls_end,
238                                               &cache_begin, &cache_end, &dtls);
239     if (!thread_found) {
240       // If a thread can't be found in the thread registry, it's probably in the
241       // process of destruction. Log this event and move on.
242       LOG_THREADS("Thread %d not found in registry.\n", os_id);
243       continue;
244     }
245     uptr sp;
246     PtraceRegistersStatus have_registers =
247         suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
248     if (have_registers != REGISTERS_AVAILABLE) {
249       Report("Unable to get registers from thread %d.\n", os_id);
250       // If unable to get SP, consider the entire stack to be reachable unless
251       // GetRegistersAndSP failed with ESRCH.
252       if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
253       sp = stack_begin;
254     }
255 
256     if (flags()->use_registers && have_registers)
257       ScanRangeForPointers(registers_begin, registers_end, frontier,
258                            "REGISTERS", kReachable);
259 
260     if (flags()->use_stacks) {
261       LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
262       if (sp < stack_begin || sp >= stack_end) {
263         // SP is outside the recorded stack range (e.g. the thread is running a
264         // signal handler on alternate stack, or swapcontext was used).
265         // Again, consider the entire stack range to be reachable.
266         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
267         uptr page_size = GetPageSizeCached();
268         int skipped = 0;
269         while (stack_begin < stack_end &&
270                !IsAccessibleMemoryRange(stack_begin, 1)) {
271           skipped++;
272           stack_begin += page_size;
273         }
274         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
275                     skipped, stack_begin, stack_end);
276       } else {
277         // Shrink the stack range to ignore out-of-scope values.
278         stack_begin = sp;
279       }
280       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
281                            kReachable);
282       ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
283     }
284 
285     if (flags()->use_tls) {
286       if (tls_begin) {
287         LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
288         // If the tls and cache ranges don't overlap, scan full tls range,
289         // otherwise, only scan the non-overlapping portions
290         if (cache_begin == cache_end || tls_end < cache_begin ||
291             tls_begin > cache_end) {
292           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
293         } else {
294           if (tls_begin < cache_begin)
295             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
296                                  kReachable);
297           if (tls_end > cache_end)
298             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
299                                  kReachable);
300         }
301       }
302       if (dtls && !DTLSInDestruction(dtls)) {
303         for (uptr j = 0; j < dtls->dtv_size; ++j) {
304           uptr dtls_beg = dtls->dtv[j].beg;
305           uptr dtls_end = dtls_beg + dtls->dtv[j].size;
306           if (dtls_beg < dtls_end) {
307             LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
308             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
309                                  kReachable);
310           }
311         }
312       } else {
313         // We are handling a thread with DTLS under destruction. Log about
314         // this and continue.
315         LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
316       }
317     }
318   }
319 }
320 
321 #endif  // SANITIZER_FUCHSIA
322 
323 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
324                     uptr region_begin, uptr region_end, bool is_readable) {
325   uptr intersection_begin = Max(root_region.begin, region_begin);
326   uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
327   if (intersection_begin >= intersection_end) return;
328   LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
329                root_region.begin, root_region.begin + root_region.size,
330                region_begin, region_end,
331                is_readable ? "readable" : "unreadable");
332   if (is_readable)
333     ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
334                          kReachable);
335 }
336 
337 static void ProcessRootRegion(Frontier *frontier,
338                               const RootRegion &root_region) {
339   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
340   MemoryMappedSegment segment;
341   while (proc_maps.Next(&segment)) {
342     ScanRootRegion(frontier, root_region, segment.start, segment.end,
343                    segment.IsReadable());
344   }
345 }
346 
347 // Scans root regions for heap pointers.
348 static void ProcessRootRegions(Frontier *frontier) {
349   if (!flags()->use_root_regions) return;
350   CHECK(root_regions);
351   for (uptr i = 0; i < root_regions->size(); i++) {
352     ProcessRootRegion(frontier, (*root_regions)[i]);
353   }
354 }
355 
356 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
357   while (frontier->size()) {
358     uptr next_chunk = frontier->back();
359     frontier->pop_back();
360     LsanMetadata m(next_chunk);
361     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
362                          "HEAP", tag);
363   }
364 }
365 
366 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
367 // which are reachable from it as indirectly leaked.
368 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
369   __lsan_current_stage = "MarkIndirectlyLeakedCb";
370   chunk = GetUserBegin(chunk);
371   LsanMetadata m(chunk);
372   if (m.allocated() && m.tag() != kReachable) {
373     ScanRangeForPointers(chunk, chunk + m.requested_size(),
374                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
375   }
376 }
377 
378 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
379 // frontier.
380 static void CollectIgnoredCb(uptr chunk, void *arg) {
381   CHECK(arg);
382   __lsan_current_stage = "CollectIgnoredCb";
383   chunk = GetUserBegin(chunk);
384   LsanMetadata m(chunk);
385   if (m.allocated() && m.tag() == kIgnored) {
386     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
387                  chunk, chunk + m.requested_size(), m.requested_size());
388     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
389   }
390 }
391 
392 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
393   CHECK(stack_id);
394   StackTrace stack = map->Get(stack_id);
395   // The top frame is our malloc/calloc/etc. The next frame is the caller.
396   if (stack.size >= 2)
397     return stack.trace[1];
398   return 0;
399 }
400 
401 struct InvalidPCParam {
402   Frontier *frontier;
403   StackDepotReverseMap *stack_depot_reverse_map;
404   bool skip_linker_allocations;
405 };
406 
407 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
408 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
409 static void MarkInvalidPCCb(uptr chunk, void *arg) {
410   CHECK(arg);
411   InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
412   __lsan_current_stage = "MarkInvalidPCCb";
413   chunk = GetUserBegin(chunk);
414   LsanMetadata m(chunk);
415   if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
416     u32 stack_id = m.stack_trace_id();
417     uptr caller_pc = 0;
418     if (stack_id > 0)
419       caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
420     // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
421     // it as reachable, as we can't properly report its allocation stack anyway.
422     if (caller_pc == 0 || (param->skip_linker_allocations &&
423                            GetLinker()->containsAddress(caller_pc))) {
424       m.set_tag(kReachable);
425       param->frontier->push_back(chunk);
426     }
427   }
428 }
429 
430 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
431 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
432 // modules accounting etc.
433 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
434 // They are allocated with a __libc_memalign() call in allocate_and_init()
435 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
436 // blocks, but we can make sure they come from our own allocator by intercepting
437 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
438 // addresses are stored in a dynamically allocated array (the DTV) which is
439 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
440 // being reachable from the static TLS, and the dynamic TLS being reachable from
441 // the DTV. This is because the initial DTV is allocated before our interception
442 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
443 // can't special-case it either, since we don't know its size.
444 // Our solution is to include in the root set all allocations made from
445 // ld-linux.so (which is where allocate_and_init() is implemented). This is
446 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
447 // which we don't care about).
448 // On all other platforms, this simply checks to ensure that the caller pc is
449 // valid before reporting chunks as leaked.
450 void ProcessPC(Frontier *frontier) {
451   StackDepotReverseMap stack_depot_reverse_map;
452   InvalidPCParam arg;
453   arg.frontier = frontier;
454   arg.stack_depot_reverse_map = &stack_depot_reverse_map;
455   arg.skip_linker_allocations =
456       flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
457   ForEachChunk(MarkInvalidPCCb, &arg);
458 }
459 
460 // Sets the appropriate tag on each chunk.
461 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
462                               Frontier *frontier) {
463   ForEachChunk(CollectIgnoredCb, frontier);
464   ProcessGlobalRegions(frontier);
465   ProcessThreads(suspended_threads, frontier);
466   ProcessRootRegions(frontier);
467   FloodFillTag(frontier, kReachable);
468 
469   CHECK_EQ(0, frontier->size());
470   ProcessPC(frontier);
471 
472   // The check here is relatively expensive, so we do this in a separate flood
473   // fill. That way we can skip the check for chunks that are reachable
474   // otherwise.
475   LOG_POINTERS("Processing platform-specific allocations.\n");
476   ProcessPlatformSpecificAllocations(frontier);
477   FloodFillTag(frontier, kReachable);
478 
479   // Iterate over leaked chunks and mark those that are reachable from other
480   // leaked chunks.
481   LOG_POINTERS("Scanning leaked chunks.\n");
482   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
483 }
484 
485 // ForEachChunk callback. Resets the tags to pre-leak-check state.
486 static void ResetTagsCb(uptr chunk, void *arg) {
487   (void)arg;
488   __lsan_current_stage = "ResetTagsCb";
489   chunk = GetUserBegin(chunk);
490   LsanMetadata m(chunk);
491   if (m.allocated() && m.tag() != kIgnored)
492     m.set_tag(kDirectlyLeaked);
493 }
494 
495 static void PrintStackTraceById(u32 stack_trace_id) {
496   CHECK(stack_trace_id);
497   StackDepotGet(stack_trace_id).Print();
498 }
499 
500 // ForEachChunk callback. Aggregates information about unreachable chunks into
501 // a LeakReport.
502 static void CollectLeaksCb(uptr chunk, void *arg) {
503   CHECK(arg);
504   LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
505   __lsan_current_stage = "CollectLeaksCb";
506   chunk = GetUserBegin(chunk);
507   LsanMetadata m(chunk);
508   if (!m.allocated()) return;
509   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
510     u32 resolution = flags()->resolution;
511     u32 stack_trace_id = 0;
512     if (resolution > 0) {
513       StackTrace stack = StackDepotGet(m.stack_trace_id());
514       stack.size = Min(stack.size, resolution);
515       stack_trace_id = StackDepotPut(stack);
516     } else {
517       stack_trace_id = m.stack_trace_id();
518     }
519     leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
520                                 m.tag());
521   }
522 }
523 
524 static void PrintMatchedSuppressions() {
525   InternalMmapVector<Suppression *> matched;
526   GetSuppressionContext()->GetMatched(&matched);
527   if (!matched.size())
528     return;
529   const char *line = "-----------------------------------------------------";
530   Printf("%s\n", line);
531   Printf("Suppressions used:\n");
532   Printf("  count      bytes template\n");
533   for (uptr i = 0; i < matched.size(); i++)
534     Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
535         &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
536   Printf("%s\n\n", line);
537 }
538 
539 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
540   const InternalMmapVector<tid_t> &suspended_threads =
541       *(const InternalMmapVector<tid_t> *)arg;
542   if (tctx->status == ThreadStatusRunning) {
543     uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
544                                 tctx->os_id, CompareLess<int>());
545     if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
546       Report("Running thread %d was not suspended. False leaks are possible.\n",
547              tctx->os_id);
548   }
549 }
550 
551 #if SANITIZER_FUCHSIA
552 
553 // Fuchsia provides a libc interface that guarantees all threads are
554 // covered, and SuspendedThreadList is never really used.
555 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
556 
557 #else  // !SANITIZER_FUCHSIA
558 
559 static void ReportUnsuspendedThreads(
560     const SuspendedThreadsList &suspended_threads) {
561   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
562   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
563     threads[i] = suspended_threads.GetThreadID(i);
564 
565   Sort(threads.data(), threads.size());
566 
567   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
568       &ReportIfNotSuspended, &threads);
569 }
570 
571 #endif  // !SANITIZER_FUCHSIA
572 
573 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
574                                   void *arg) {
575   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
576   CHECK(param);
577   CHECK(!param->success);
578   ReportUnsuspendedThreads(suspended_threads);
579   ClassifyAllChunks(suspended_threads, &param->frontier);
580   ForEachChunk(CollectLeaksCb, &param->leak_report);
581   // Clean up for subsequent leak checks. This assumes we did not overwrite any
582   // kIgnored tags.
583   ForEachChunk(ResetTagsCb, nullptr);
584   param->success = true;
585 }
586 
587 static bool CheckForLeaks() {
588   if (&__lsan_is_turned_off && __lsan_is_turned_off())
589       return false;
590   EnsureMainThreadIDIsCorrect();
591   CheckForLeaksParam param;
592   LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
593 
594   if (!param.success) {
595     Report("LeakSanitizer has encountered a fatal error.\n");
596     Report(
597         "HINT: For debugging, try setting environment variable "
598         "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
599     Report(
600         "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
601     Die();
602   }
603   param.leak_report.ApplySuppressions();
604   uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
605   if (unsuppressed_count > 0) {
606     Decorator d;
607     Printf("\n"
608            "================================================================="
609            "\n");
610     Printf("%s", d.Error());
611     Report("ERROR: LeakSanitizer: detected memory leaks\n");
612     Printf("%s", d.Default());
613     param.leak_report.ReportTopLeaks(flags()->max_leaks);
614   }
615   if (common_flags()->print_suppressions)
616     PrintMatchedSuppressions();
617   if (unsuppressed_count > 0) {
618     param.leak_report.PrintSummary();
619     return true;
620   }
621   return false;
622 }
623 
624 static bool has_reported_leaks = false;
625 bool HasReportedLeaks() { return has_reported_leaks; }
626 
627 void DoLeakCheck() {
628   BlockingMutexLock l(&global_mutex);
629   static bool already_done;
630   if (already_done) return;
631   already_done = true;
632   has_reported_leaks = CheckForLeaks();
633   if (has_reported_leaks) HandleLeaks();
634 }
635 
636 static int DoRecoverableLeakCheck() {
637   BlockingMutexLock l(&global_mutex);
638   bool have_leaks = CheckForLeaks();
639   return have_leaks ? 1 : 0;
640 }
641 
642 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
643 
644 static Suppression *GetSuppressionForAddr(uptr addr) {
645   Suppression *s = nullptr;
646 
647   // Suppress by module name.
648   SuppressionContext *suppressions = GetSuppressionContext();
649   if (const char *module_name =
650           Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
651     if (suppressions->Match(module_name, kSuppressionLeak, &s))
652       return s;
653 
654   // Suppress by file or function name.
655   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
656   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
657     if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
658         suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
659       break;
660     }
661   }
662   frames->ClearAll();
663   return s;
664 }
665 
666 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
667   StackTrace stack = StackDepotGet(stack_trace_id);
668   for (uptr i = 0; i < stack.size; i++) {
669     Suppression *s = GetSuppressionForAddr(
670         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
671     if (s) return s;
672   }
673   return nullptr;
674 }
675 
676 ///// LeakReport implementation. /////
677 
678 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
679 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
680 // in real-world applications.
681 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
682 // use a hash table.
683 const uptr kMaxLeaksConsidered = 5000;
684 
685 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
686                                 uptr leaked_size, ChunkTag tag) {
687   CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
688   bool is_directly_leaked = (tag == kDirectlyLeaked);
689   uptr i;
690   for (i = 0; i < leaks_.size(); i++) {
691     if (leaks_[i].stack_trace_id == stack_trace_id &&
692         leaks_[i].is_directly_leaked == is_directly_leaked) {
693       leaks_[i].hit_count++;
694       leaks_[i].total_size += leaked_size;
695       break;
696     }
697   }
698   if (i == leaks_.size()) {
699     if (leaks_.size() == kMaxLeaksConsidered) return;
700     Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
701                   is_directly_leaked, /* is_suppressed */ false };
702     leaks_.push_back(leak);
703   }
704   if (flags()->report_objects) {
705     LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
706     leaked_objects_.push_back(obj);
707   }
708 }
709 
710 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
711   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
712     return leak1.total_size > leak2.total_size;
713   else
714     return leak1.is_directly_leaked;
715 }
716 
717 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
718   CHECK(leaks_.size() <= kMaxLeaksConsidered);
719   Printf("\n");
720   if (leaks_.size() == kMaxLeaksConsidered)
721     Printf("Too many leaks! Only the first %zu leaks encountered will be "
722            "reported.\n",
723            kMaxLeaksConsidered);
724 
725   uptr unsuppressed_count = UnsuppressedLeakCount();
726   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
727     Printf("The %zu top leak(s):\n", num_leaks_to_report);
728   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
729   uptr leaks_reported = 0;
730   for (uptr i = 0; i < leaks_.size(); i++) {
731     if (leaks_[i].is_suppressed) continue;
732     PrintReportForLeak(i);
733     leaks_reported++;
734     if (leaks_reported == num_leaks_to_report) break;
735   }
736   if (leaks_reported < unsuppressed_count) {
737     uptr remaining = unsuppressed_count - leaks_reported;
738     Printf("Omitting %zu more leak(s).\n", remaining);
739   }
740 }
741 
742 void LeakReport::PrintReportForLeak(uptr index) {
743   Decorator d;
744   Printf("%s", d.Leak());
745   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
746          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
747          leaks_[index].total_size, leaks_[index].hit_count);
748   Printf("%s", d.Default());
749 
750   PrintStackTraceById(leaks_[index].stack_trace_id);
751 
752   if (flags()->report_objects) {
753     Printf("Objects leaked above:\n");
754     PrintLeakedObjectsForLeak(index);
755     Printf("\n");
756   }
757 }
758 
759 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
760   u32 leak_id = leaks_[index].id;
761   for (uptr j = 0; j < leaked_objects_.size(); j++) {
762     if (leaked_objects_[j].leak_id == leak_id)
763       Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
764              leaked_objects_[j].size);
765   }
766 }
767 
768 void LeakReport::PrintSummary() {
769   CHECK(leaks_.size() <= kMaxLeaksConsidered);
770   uptr bytes = 0, allocations = 0;
771   for (uptr i = 0; i < leaks_.size(); i++) {
772       if (leaks_[i].is_suppressed) continue;
773       bytes += leaks_[i].total_size;
774       allocations += leaks_[i].hit_count;
775   }
776   InternalScopedString summary(kMaxSummaryLength);
777   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
778                  allocations);
779   ReportErrorSummary(summary.data());
780 }
781 
782 void LeakReport::ApplySuppressions() {
783   for (uptr i = 0; i < leaks_.size(); i++) {
784     Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
785     if (s) {
786       s->weight += leaks_[i].total_size;
787       atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
788           leaks_[i].hit_count);
789       leaks_[i].is_suppressed = true;
790     }
791   }
792 }
793 
794 uptr LeakReport::UnsuppressedLeakCount() {
795   uptr result = 0;
796   for (uptr i = 0; i < leaks_.size(); i++)
797     if (!leaks_[i].is_suppressed) result++;
798   return result;
799 }
800 
801 } // namespace __lsan
802 #else // CAN_SANITIZE_LEAKS
803 namespace __lsan {
804 void InitCommonLsan() { }
805 void DoLeakCheck() { }
806 void DoRecoverableLeakCheckVoid() { }
807 void DisableInThisThread() { }
808 void EnableInThisThread() { }
809 }
810 #endif // CAN_SANITIZE_LEAKS
811 
812 using namespace __lsan;
813 
814 extern "C" {
815 SANITIZER_INTERFACE_ATTRIBUTE
816 void __lsan_ignore_object(const void *p) {
817 #if CAN_SANITIZE_LEAKS
818   if (!common_flags()->detect_leaks)
819     return;
820   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
821   // locked.
822   BlockingMutexLock l(&global_mutex);
823   IgnoreObjectResult res = IgnoreObjectLocked(p);
824   if (res == kIgnoreObjectInvalid)
825     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
826   if (res == kIgnoreObjectAlreadyIgnored)
827     VReport(1, "__lsan_ignore_object(): "
828            "heap object at %p is already being ignored\n", p);
829   if (res == kIgnoreObjectSuccess)
830     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
831 #endif // CAN_SANITIZE_LEAKS
832 }
833 
834 SANITIZER_INTERFACE_ATTRIBUTE
835 void __lsan_register_root_region(const void *begin, uptr size) {
836 #if CAN_SANITIZE_LEAKS
837   BlockingMutexLock l(&global_mutex);
838   CHECK(root_regions);
839   RootRegion region = {reinterpret_cast<uptr>(begin), size};
840   root_regions->push_back(region);
841   VReport(1, "Registered root region at %p of size %llu\n", begin, size);
842 #endif // CAN_SANITIZE_LEAKS
843 }
844 
845 SANITIZER_INTERFACE_ATTRIBUTE
846 void __lsan_unregister_root_region(const void *begin, uptr size) {
847 #if CAN_SANITIZE_LEAKS
848   BlockingMutexLock l(&global_mutex);
849   CHECK(root_regions);
850   bool removed = false;
851   for (uptr i = 0; i < root_regions->size(); i++) {
852     RootRegion region = (*root_regions)[i];
853     if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
854       removed = true;
855       uptr last_index = root_regions->size() - 1;
856       (*root_regions)[i] = (*root_regions)[last_index];
857       root_regions->pop_back();
858       VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
859       break;
860     }
861   }
862   if (!removed) {
863     Report(
864         "__lsan_unregister_root_region(): region at %p of size %llu has not "
865         "been registered.\n",
866         begin, size);
867     Die();
868   }
869 #endif // CAN_SANITIZE_LEAKS
870 }
871 
872 SANITIZER_INTERFACE_ATTRIBUTE
873 void __lsan_disable() {
874 #if CAN_SANITIZE_LEAKS
875   __lsan::DisableInThisThread();
876 #endif
877 }
878 
879 SANITIZER_INTERFACE_ATTRIBUTE
880 void __lsan_enable() {
881 #if CAN_SANITIZE_LEAKS
882   __lsan::EnableInThisThread();
883 #endif
884 }
885 
886 SANITIZER_INTERFACE_ATTRIBUTE
887 void __lsan_do_leak_check() {
888 #if CAN_SANITIZE_LEAKS
889   if (common_flags()->detect_leaks)
890     __lsan::DoLeakCheck();
891 #endif // CAN_SANITIZE_LEAKS
892 }
893 
894 SANITIZER_INTERFACE_ATTRIBUTE
895 int __lsan_do_recoverable_leak_check() {
896 #if CAN_SANITIZE_LEAKS
897   if (common_flags()->detect_leaks)
898     return __lsan::DoRecoverableLeakCheck();
899 #endif // CAN_SANITIZE_LEAKS
900   return 0;
901 }
902 
903 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
904 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
905 const char * __lsan_default_options() {
906   return "";
907 }
908 
909 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
910 int __lsan_is_turned_off() {
911   return 0;
912 }
913 
914 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
915 const char *__lsan_default_suppressions() {
916   return "";
917 }
918 #endif
919 } // extern "C"
920