xref: /freebsd/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "lsan_common.h"
15 
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27 
28 #if CAN_SANITIZE_LEAKS
29 
30 #  if SANITIZER_APPLE
31 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
32 #    if SANITIZER_IOS && !SANITIZER_IOSSIM
33 #      define OBJC_DATA_MASK 0x0000007ffffffff8UL
34 #    else
35 #      define OBJC_DATA_MASK 0x00007ffffffffff8UL
36 #    endif
37 #  endif
38 
39 namespace __lsan {
40 
41 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
42 // also to protect the global list of root regions.
43 static Mutex global_mutex;
44 
LockGlobal()45 void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }
UnlockGlobal()46 void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }
47 
48 Flags lsan_flags;
49 
DisableCounterUnderflow()50 void DisableCounterUnderflow() {
51   if (common_flags()->detect_leaks) {
52     Report("Unmatched call to __lsan_enable().\n");
53     Die();
54   }
55 }
56 
SetDefaults()57 void Flags::SetDefaults() {
58 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
59 #  include "lsan_flags.inc"
60 #  undef LSAN_FLAG
61 }
62 
RegisterLsanFlags(FlagParser * parser,Flags * f)63 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
64 #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
65     RegisterFlag(parser, #Name, Description, &f->Name);
66 #  include "lsan_flags.inc"
67 #  undef LSAN_FLAG
68 }
69 
70 #  define LOG_POINTERS(...)      \
71     do {                         \
72       if (flags()->log_pointers) \
73         Report(__VA_ARGS__);     \
74     } while (0)
75 
76 #  define LOG_THREADS(...)      \
77     do {                        \
78       if (flags()->log_threads) \
79         Report(__VA_ARGS__);    \
80     } while (0)
81 
82 class LeakSuppressionContext {
83   bool parsed = false;
84   SuppressionContext context;
85   bool suppressed_stacks_sorted = true;
86   InternalMmapVector<u32> suppressed_stacks;
87   const LoadedModule *suppress_module = nullptr;
88 
89   void LazyInit();
90   Suppression *GetSuppressionForAddr(uptr addr);
91   bool SuppressInvalid(const StackTrace &stack);
92   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
93 
94  public:
LeakSuppressionContext(const char * supprression_types[],int suppression_types_num)95   LeakSuppressionContext(const char *supprression_types[],
96                          int suppression_types_num)
97       : context(supprression_types, suppression_types_num) {}
98 
99   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
100 
GetSortedSuppressedStacks()101   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
102     if (!suppressed_stacks_sorted) {
103       suppressed_stacks_sorted = true;
104       SortAndDedup(suppressed_stacks);
105     }
106     return suppressed_stacks;
107   }
108   void PrintMatchedSuppressions();
109 };
110 
111 alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
112 static LeakSuppressionContext *suppression_ctx = nullptr;
113 static const char kSuppressionLeak[] = "leak";
114 static const char *kSuppressionTypes[] = {kSuppressionLeak};
115 static const char kStdSuppressions[] =
116 #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
117     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
118     // definition.
119     "leak:*pthread_exit*\n"
120 #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
121 #  if SANITIZER_APPLE
122     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
123     "leak:*_os_trace*\n"
124 #  endif
125     // TLS leak in some glibc versions, described in
126     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
127     "leak:*tls_get_addr*\n"
128     "leak:*dlerror*\n";
129 
InitializeSuppressions()130 void InitializeSuppressions() {
131   CHECK_EQ(nullptr, suppression_ctx);
132   suppression_ctx = new (suppression_placeholder)
133       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
134 }
135 
LazyInit()136 void LeakSuppressionContext::LazyInit() {
137   if (!parsed) {
138     parsed = true;
139     context.ParseFromFile(flags()->suppressions);
140     if (&__lsan_default_suppressions)
141       context.Parse(__lsan_default_suppressions());
142     context.Parse(kStdSuppressions);
143     if (flags()->use_tls && flags()->use_ld_allocations)
144       suppress_module = GetLinker();
145   }
146 }
147 
GetSuppressionForAddr(uptr addr)148 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
149   Suppression *s = nullptr;
150 
151   // Suppress by module name.
152   const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
153   if (!module_name)
154     module_name = "<unknown module>";
155   if (context.Match(module_name, kSuppressionLeak, &s))
156     return s;
157 
158   // Suppress by file or function name.
159   SymbolizedStackHolder symbolized_stack(
160       Symbolizer::GetOrInit()->SymbolizePC(addr));
161   const SymbolizedStack *frames = symbolized_stack.get();
162   for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
163     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
164         context.Match(cur->info.file, kSuppressionLeak, &s)) {
165       break;
166     }
167   }
168   return s;
169 }
170 
GetCallerPC(const StackTrace & stack)171 static uptr GetCallerPC(const StackTrace &stack) {
172   // The top frame is our malloc/calloc/etc. The next frame is the caller.
173   if (stack.size >= 2)
174     return stack.trace[1];
175   return 0;
176 }
177 
178 #  if SANITIZER_APPLE
179 // Several pointers in the Objective-C runtime (method cache and class_rw_t,
180 // for example) are tagged with additional bits we need to strip.
TransformPointer(void * p)181 static inline void *TransformPointer(void *p) {
182   uptr ptr = reinterpret_cast<uptr>(p);
183   return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
184 }
185 #  endif
186 
187 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
188 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
189 // modules accounting etc.
190 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
191 // They are allocated with a __libc_memalign() call in allocate_and_init()
192 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
193 // blocks, but we can make sure they come from our own allocator by intercepting
194 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
195 // addresses are stored in a dynamically allocated array (the DTV) which is
196 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
197 // being reachable from the static TLS, and the dynamic TLS being reachable from
198 // the DTV. This is because the initial DTV is allocated before our interception
199 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
200 // can't special-case it either, since we don't know its size.
201 // Our solution is to include in the root set all allocations made from
202 // ld-linux.so (which is where allocate_and_init() is implemented). This is
203 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
204 // which we don't care about).
205 // On all other platforms, this simply checks to ensure that the caller pc is
206 // valid before reporting chunks as leaked.
SuppressInvalid(const StackTrace & stack)207 bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
208   uptr caller_pc = GetCallerPC(stack);
209   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
210   // it as reachable, as we can't properly report its allocation stack anyway.
211   return !caller_pc ||
212          (suppress_module && suppress_module->containsAddress(caller_pc));
213 }
214 
SuppressByRule(const StackTrace & stack,uptr hit_count,uptr total_size)215 bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
216                                             uptr hit_count, uptr total_size) {
217   for (uptr i = 0; i < stack.size; i++) {
218     Suppression *s = GetSuppressionForAddr(
219         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
220     if (s) {
221       s->weight += total_size;
222       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
223       return true;
224     }
225   }
226   return false;
227 }
228 
Suppress(u32 stack_trace_id,uptr hit_count,uptr total_size)229 bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
230                                       uptr total_size) {
231   LazyInit();
232   StackTrace stack = StackDepotGet(stack_trace_id);
233   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
234     return false;
235   suppressed_stacks_sorted = false;
236   suppressed_stacks.push_back(stack_trace_id);
237   return true;
238 }
239 
GetSuppressionContext()240 static LeakSuppressionContext *GetSuppressionContext() {
241   CHECK(suppression_ctx);
242   return suppression_ctx;
243 }
244 
InitCommonLsan()245 void InitCommonLsan() {
246   if (common_flags()->detect_leaks) {
247     // Initialization which can fail or print warnings should only be done if
248     // LSan is actually enabled.
249     InitializeSuppressions();
250     InitializePlatformSpecificModules();
251   }
252 }
253 
254 class Decorator : public __sanitizer::SanitizerCommonDecorator {
255  public:
Decorator()256   Decorator() : SanitizerCommonDecorator() {}
Error()257   const char *Error() { return Red(); }
Leak()258   const char *Leak() { return Blue(); }
259 };
260 
MaybeUserPointer(uptr p)261 static inline bool MaybeUserPointer(uptr p) {
262   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
263   // bound on heap addresses.
264   const uptr kMinAddress = 4 * 4096;
265   if (p < kMinAddress)
266     return false;
267 #  if defined(__x86_64__)
268   // TODO: support LAM48 and 5 level page tables.
269   // LAM_U57 mask format
270   //  * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
271   //  * top-1 byte: 0xff because it should be 0
272   //  * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
273   constexpr uptr kLAM_U57Mask = 0x81ff80;
274   constexpr uptr kPointerMask = kLAM_U57Mask << 40;
275   return ((p & kPointerMask) == 0);
276 #  elif defined(__mips64)
277   return ((p >> 40) == 0);
278 #  elif defined(__aarch64__)
279   // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
280   // address translation and can be used to store a tag.
281   constexpr uptr kPointerMask = 255ULL << 48;
282   // Accept up to 48 bit VMA.
283   return ((p & kPointerMask) == 0);
284 #  elif defined(__loongarch_lp64)
285   // Allow 47-bit user-space VMA at current.
286   return ((p >> 47) == 0);
287 #  else
288   return true;
289 #  endif
290 }
291 
292 namespace {
293 struct DirectMemoryAccessor {
Init__lsan::__anon1849beb60111::DirectMemoryAccessor294   void Init(uptr begin, uptr end) {};
LoadPtr__lsan::__anon1849beb60111::DirectMemoryAccessor295   void *LoadPtr(uptr p) const { return *reinterpret_cast<void **>(p); }
296 };
297 
298 struct CopyMemoryAccessor {
Init__lsan::__anon1849beb60111::CopyMemoryAccessor299   void Init(uptr begin, uptr end) {
300     this->begin = begin;
301     buffer.clear();
302     buffer.resize(end - begin);
303     MemCpyAccessible(buffer.data(), reinterpret_cast<void *>(begin),
304                      buffer.size());
305   };
306 
LoadPtr__lsan::__anon1849beb60111::CopyMemoryAccessor307   void *LoadPtr(uptr p) const {
308     uptr offset = p - begin;
309     CHECK_LE(offset + sizeof(void *), reinterpret_cast<uptr>(buffer.size()));
310     return *reinterpret_cast<void **>(offset +
311                                       reinterpret_cast<uptr>(buffer.data()));
312   }
313 
314  private:
315   uptr begin;
316   InternalMmapVector<char> buffer;
317 };
318 }  // namespace
319 
320 // Scans the memory range, looking for byte patterns that point into allocator
321 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
322 // There are two usage modes for this function: finding reachable chunks
323 // (|tag| = kReachable) and finding indirectly leaked chunks
324 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
325 // so |frontier| = 0.
326 template <class Accessor>
ScanForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag,Accessor & accessor)327 void ScanForPointers(uptr begin, uptr end, Frontier *frontier,
328                      const char *region_type, ChunkTag tag,
329                      Accessor &accessor) {
330   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
331   const uptr alignment = flags()->pointer_alignment();
332   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
333                (void *)end);
334   accessor.Init(begin, end);
335   uptr pp = begin;
336   if (pp % alignment)
337     pp = pp + alignment - pp % alignment;
338   for (; pp + sizeof(void *) <= end; pp += alignment) {
339     void *p = accessor.LoadPtr(pp);
340 #  if SANITIZER_APPLE
341     p = TransformPointer(p);
342 #  endif
343     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
344       continue;
345     uptr chunk = PointsIntoChunk(p);
346     if (!chunk)
347       continue;
348     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
349     if (chunk == begin)
350       continue;
351     LsanMetadata m(chunk);
352     if (m.tag() == kReachable || m.tag() == kIgnored)
353       continue;
354 
355     // Do this check relatively late so we can log only the interesting cases.
356     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
357       LOG_POINTERS(
358           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
359           "%zu.\n",
360           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
361           m.requested_size());
362       continue;
363     }
364 
365     m.set_tag(tag);
366     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
367                  (void *)pp, p, (void *)chunk,
368                  (void *)(chunk + m.requested_size()), m.requested_size());
369     if (frontier)
370       frontier->push_back(chunk);
371   }
372 }
373 
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)374 void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
375                           const char *region_type, ChunkTag tag) {
376   DirectMemoryAccessor accessor;
377   ScanForPointers(begin, end, frontier, region_type, tag, accessor);
378 }
379 
380 // Scans a global range for pointers
ScanGlobalRange(uptr begin,uptr end,Frontier * frontier)381 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
382   uptr allocator_begin = 0, allocator_end = 0;
383   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
384   if (begin <= allocator_begin && allocator_begin < end) {
385     CHECK_LE(allocator_begin, allocator_end);
386     CHECK_LE(allocator_end, end);
387     if (begin < allocator_begin)
388       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
389                            kReachable);
390     if (allocator_end < end)
391       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
392   } else {
393     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
394   }
395 }
396 
397 template <class Accessor>
ScanRanges(const InternalMmapVector<Range> & ranges,Frontier * frontier,const char * region_type,Accessor & accessor)398 void ScanRanges(const InternalMmapVector<Range> &ranges, Frontier *frontier,
399                 const char *region_type, Accessor &accessor) {
400   for (uptr i = 0; i < ranges.size(); i++) {
401     ScanForPointers(ranges[i].begin, ranges[i].end, frontier, region_type,
402                     kReachable, accessor);
403   }
404 }
405 
ScanExtraStackRanges(const InternalMmapVector<Range> & ranges,Frontier * frontier)406 void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
407                           Frontier *frontier) {
408   DirectMemoryAccessor accessor;
409   ScanRanges(ranges, frontier, "FAKE STACK", accessor);
410 }
411 
412 #  if SANITIZER_FUCHSIA
413 
414 // Fuchsia handles all threads together with its own callback.
ProcessThreads(SuspendedThreadsList const &,Frontier *,tid_t,uptr)415 static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
416                            uptr) {}
417 
418 #  else
419 
420 #    if SANITIZER_ANDROID
421 // FIXME: Move this out into *libcdep.cpp
422 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
423     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
424 #    endif
425 
ProcessThreadRegistry(Frontier * frontier)426 static void ProcessThreadRegistry(Frontier *frontier) {
427   InternalMmapVector<uptr> ptrs;
428   GetAdditionalThreadContextPtrsLocked(&ptrs);
429 
430   for (uptr i = 0; i < ptrs.size(); ++i) {
431     void *ptr = reinterpret_cast<void *>(ptrs[i]);
432     uptr chunk = PointsIntoChunk(ptr);
433     if (!chunk)
434       continue;
435     LsanMetadata m(chunk);
436     if (!m.allocated())
437       continue;
438 
439     // Mark as reachable and add to frontier.
440     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
441     m.set_tag(kReachable);
442     frontier->push_back(chunk);
443   }
444 }
445 
446 // Scans thread data (stacks and TLS) for heap pointers.
447 template <class Accessor>
ProcessThread(tid_t os_id,uptr sp,const InternalMmapVector<uptr> & registers,InternalMmapVector<Range> & extra_ranges,Frontier * frontier,Accessor & accessor)448 static void ProcessThread(tid_t os_id, uptr sp,
449                           const InternalMmapVector<uptr> &registers,
450                           InternalMmapVector<Range> &extra_ranges,
451                           Frontier *frontier, Accessor &accessor) {
452   // `extra_ranges` is outside of the function and the loop to reused mapped
453   // memory.
454   CHECK(extra_ranges.empty());
455   LOG_THREADS("Processing thread %llu.\n", os_id);
456   uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
457   DTLS *dtls;
458   bool thread_found =
459       GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
460                             &tls_end, &cache_begin, &cache_end, &dtls);
461   if (!thread_found) {
462     // If a thread can't be found in the thread registry, it's probably in the
463     // process of destruction. Log this event and move on.
464     LOG_THREADS("Thread %llu not found in registry.\n", os_id);
465     return;
466   }
467 
468   if (!sp)
469     sp = stack_begin;
470 
471   if (flags()->use_registers) {
472     uptr registers_begin = reinterpret_cast<uptr>(registers.data());
473     uptr registers_end =
474         reinterpret_cast<uptr>(registers.data() + registers.size());
475     ScanForPointers(registers_begin, registers_end, frontier, "REGISTERS",
476                     kReachable, accessor);
477   }
478 
479   if (flags()->use_stacks) {
480     LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
481                 (void *)stack_end, (void *)sp);
482     if (sp < stack_begin || sp >= stack_end) {
483       // SP is outside the recorded stack range (e.g. the thread is running a
484       // signal handler on alternate stack, or swapcontext was used).
485       // Again, consider the entire stack range to be reachable.
486       LOG_THREADS("WARNING: stack pointer not in stack range.\n");
487       uptr page_size = GetPageSizeCached();
488       int skipped = 0;
489       while (stack_begin < stack_end &&
490              !IsAccessibleMemoryRange(stack_begin, 1)) {
491         skipped++;
492         stack_begin += page_size;
493       }
494       LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", skipped,
495                   (void *)stack_begin, (void *)stack_end);
496     } else {
497       // Shrink the stack range to ignore out-of-scope values.
498       stack_begin = sp;
499     }
500     ScanForPointers(stack_begin, stack_end, frontier, "STACK", kReachable,
501                     accessor);
502     GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
503     ScanRanges(extra_ranges, frontier, "FAKE STACK", accessor);
504   }
505 
506   if (flags()->use_tls) {
507     if (tls_begin) {
508       LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
509       // If the tls and cache ranges don't overlap, scan full tls range,
510       // otherwise, only scan the non-overlapping portions
511       if (cache_begin == cache_end || tls_end < cache_begin ||
512           tls_begin > cache_end) {
513         ScanForPointers(tls_begin, tls_end, frontier, "TLS", kReachable,
514                         accessor);
515       } else {
516         if (tls_begin < cache_begin)
517           ScanForPointers(tls_begin, cache_begin, frontier, "TLS", kReachable,
518                           accessor);
519         if (tls_end > cache_end)
520           ScanForPointers(cache_end, tls_end, frontier, "TLS", kReachable,
521                           accessor);
522       }
523     }
524 #    if SANITIZER_ANDROID
525     extra_ranges.clear();
526     auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
527                    void *arg) -> void {
528       reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back(
529           {reinterpret_cast<uptr>(dtls_begin),
530            reinterpret_cast<uptr>(dtls_end)});
531     };
532     ScanRanges(extra_ranges, frontier, "DTLS", accessor);
533     // FIXME: There might be a race-condition here (and in Bionic) if the
534     // thread is suspended in the middle of updating its DTLS. IOWs, we
535     // could scan already freed memory. (probably fine for now)
536     __libc_iterate_dynamic_tls(os_id, cb, frontier);
537 #    else
538     if (dtls && !DTLSInDestruction(dtls)) {
539       ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
540         uptr dtls_beg = dtv.beg;
541         uptr dtls_end = dtls_beg + dtv.size;
542         if (dtls_beg < dtls_end) {
543           LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
544                       (void *)dtls_end);
545           ScanForPointers(dtls_beg, dtls_end, frontier, "DTLS", kReachable,
546                           accessor);
547         }
548       });
549     } else {
550       // We are handling a thread with DTLS under destruction. Log about
551       // this and continue.
552       LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
553     }
554 #    endif
555   }
556 }
557 
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier,tid_t caller_tid,uptr caller_sp)558 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
559                            Frontier *frontier, tid_t caller_tid,
560                            uptr caller_sp) {
561   InternalMmapVector<tid_t> done_threads;
562   InternalMmapVector<uptr> registers;
563   InternalMmapVector<Range> extra_ranges;
564   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
565     registers.clear();
566     extra_ranges.clear();
567 
568     const tid_t os_id = suspended_threads.GetThreadID(i);
569     uptr sp = 0;
570     PtraceRegistersStatus have_registers =
571         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
572     if (have_registers != REGISTERS_AVAILABLE) {
573       VReport(1, "Unable to get registers from thread %llu.\n", os_id);
574       // If unable to get SP, consider the entire stack to be reachable unless
575       // GetRegistersAndSP failed with ESRCH.
576       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
577         continue;
578       sp = 0;
579     }
580 
581     if (os_id == caller_tid)
582       sp = caller_sp;
583 
584     DirectMemoryAccessor accessor;
585     ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor);
586     if (flags()->use_detached)
587       done_threads.push_back(os_id);
588   }
589 
590   if (flags()->use_detached) {
591     CopyMemoryAccessor accessor;
592     InternalMmapVector<tid_t> known_threads;
593     GetRunningThreadsLocked(&known_threads);
594     Sort(done_threads.data(), done_threads.size());
595     for (tid_t os_id : known_threads) {
596       registers.clear();
597       extra_ranges.clear();
598 
599       uptr i = InternalLowerBound(done_threads, os_id);
600       if (i >= done_threads.size() || done_threads[i] != os_id) {
601         uptr sp = (os_id == caller_tid) ? caller_sp : 0;
602         ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor);
603       }
604     }
605   }
606 
607   // Add pointers reachable from ThreadContexts
608   ProcessThreadRegistry(frontier);
609 }
610 
611 #  endif  // SANITIZER_FUCHSIA
612 
613 // A map that contains [region_begin, region_end) pairs.
614 using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
615 
GetRootRegionsLocked()616 static RootRegions &GetRootRegionsLocked() {
617   global_mutex.CheckLocked();
618   static RootRegions *regions = nullptr;
619   alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
620   if (!regions)
621     regions = new (placeholder) RootRegions();
622   return *regions;
623 }
624 
HasRootRegions()625 bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
626 
ScanRootRegions(Frontier * frontier,const InternalMmapVectorNoCtor<Region> & mapped_regions)627 void ScanRootRegions(Frontier *frontier,
628                      const InternalMmapVectorNoCtor<Region> &mapped_regions) {
629   if (!flags()->use_root_regions)
630     return;
631 
632   InternalMmapVector<Region> regions;
633   GetRootRegionsLocked().forEach([&](const auto &kv) {
634     regions.push_back({kv.first.first, kv.first.second});
635     return true;
636   });
637 
638   InternalMmapVector<Region> intersection;
639   Intersect(mapped_regions, regions, intersection);
640 
641   for (const Region &r : intersection) {
642     LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
643                  (void *)r.begin, (void *)r.end);
644     ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
645   }
646 }
647 
648 // Scans root regions for heap pointers.
ProcessRootRegions(Frontier * frontier)649 static void ProcessRootRegions(Frontier *frontier) {
650   if (!flags()->use_root_regions || !HasRootRegions())
651     return;
652   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
653   MemoryMappedSegment segment;
654   InternalMmapVector<Region> mapped_regions;
655   while (proc_maps.Next(&segment))
656     if (segment.IsReadable())
657       mapped_regions.push_back({segment.start, segment.end});
658   ScanRootRegions(frontier, mapped_regions);
659 }
660 
FloodFillTag(Frontier * frontier,ChunkTag tag)661 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
662   while (frontier->size()) {
663     uptr next_chunk = frontier->back();
664     frontier->pop_back();
665     LsanMetadata m(next_chunk);
666     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
667                          "HEAP", tag);
668   }
669 }
670 
671 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
672 // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)673 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
674   chunk = GetUserBegin(chunk);
675   LsanMetadata m(chunk);
676   if (m.allocated() && m.tag() != kReachable) {
677     ScanRangeForPointers(chunk, chunk + m.requested_size(),
678                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
679   }
680 }
681 
IgnoredSuppressedCb(uptr chunk,void * arg)682 static void IgnoredSuppressedCb(uptr chunk, void *arg) {
683   CHECK(arg);
684   chunk = GetUserBegin(chunk);
685   LsanMetadata m(chunk);
686   if (!m.allocated() || m.tag() == kIgnored)
687     return;
688 
689   const InternalMmapVector<u32> &suppressed =
690       *static_cast<const InternalMmapVector<u32> *>(arg);
691   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
692   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
693     return;
694 
695   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
696                (void *)(chunk + m.requested_size()), m.requested_size());
697   m.set_tag(kIgnored);
698 }
699 
700 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
701 // frontier.
CollectIgnoredCb(uptr chunk,void * arg)702 static void CollectIgnoredCb(uptr chunk, void *arg) {
703   CHECK(arg);
704   chunk = GetUserBegin(chunk);
705   LsanMetadata m(chunk);
706   if (m.allocated() && m.tag() == kIgnored) {
707     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
708                  (void *)(chunk + m.requested_size()), m.requested_size());
709     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
710   }
711 }
712 
713 // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads,Frontier * frontier,tid_t caller_tid,uptr caller_sp)714 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
715                               Frontier *frontier, tid_t caller_tid,
716                               uptr caller_sp) {
717   const InternalMmapVector<u32> &suppressed_stacks =
718       GetSuppressionContext()->GetSortedSuppressedStacks();
719   if (!suppressed_stacks.empty()) {
720     ForEachChunk(IgnoredSuppressedCb,
721                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
722   }
723   ForEachChunk(CollectIgnoredCb, frontier);
724   ProcessGlobalRegions(frontier);
725   ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
726   ProcessRootRegions(frontier);
727   FloodFillTag(frontier, kReachable);
728 
729   // The check here is relatively expensive, so we do this in a separate flood
730   // fill. That way we can skip the check for chunks that are reachable
731   // otherwise.
732   LOG_POINTERS("Processing platform-specific allocations.\n");
733   ProcessPlatformSpecificAllocations(frontier);
734   FloodFillTag(frontier, kReachable);
735 
736   // Iterate over leaked chunks and mark those that are reachable from other
737   // leaked chunks.
738   LOG_POINTERS("Scanning leaked chunks.\n");
739   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
740 }
741 
742 // ForEachChunk callback. Resets the tags to pre-leak-check state.
ResetTagsCb(uptr chunk,void * arg)743 static void ResetTagsCb(uptr chunk, void *arg) {
744   (void)arg;
745   chunk = GetUserBegin(chunk);
746   LsanMetadata m(chunk);
747   if (m.allocated() && m.tag() != kIgnored)
748     m.set_tag(kDirectlyLeaked);
749 }
750 
751 // ForEachChunk callback. Aggregates information about unreachable chunks into
752 // a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)753 static void CollectLeaksCb(uptr chunk, void *arg) {
754   CHECK(arg);
755   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
756   chunk = GetUserBegin(chunk);
757   LsanMetadata m(chunk);
758   if (!m.allocated())
759     return;
760   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
761     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
762 }
763 
PrintMatchedSuppressions()764 void LeakSuppressionContext::PrintMatchedSuppressions() {
765   InternalMmapVector<Suppression *> matched;
766   context.GetMatched(&matched);
767   if (!matched.size())
768     return;
769   const char *line = "-----------------------------------------------------";
770   Printf("%s\n", line);
771   Printf("Suppressions used:\n");
772   Printf("  count      bytes template\n");
773   for (uptr i = 0; i < matched.size(); i++) {
774     Printf("%7zu %10zu %s\n",
775            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
776            matched[i]->weight, matched[i]->templ);
777   }
778   Printf("%s\n\n", line);
779 }
780 
781 #  if SANITIZER_FUCHSIA
782 
783 // Fuchsia provides a libc interface that guarantees all threads are
784 // covered, and SuspendedThreadList is never really used.
ReportUnsuspendedThreads(const SuspendedThreadsList &)785 static bool ReportUnsuspendedThreads(const SuspendedThreadsList &) {
786   return true;
787 }
788 
789 #  else  // !SANITIZER_FUCHSIA
790 
ReportUnsuspendedThreads(const SuspendedThreadsList & suspended_threads)791 static bool ReportUnsuspendedThreads(
792     const SuspendedThreadsList &suspended_threads) {
793   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
794   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
795     threads[i] = suspended_threads.GetThreadID(i);
796 
797   Sort(threads.data(), threads.size());
798 
799   InternalMmapVector<tid_t> known_threads;
800   GetRunningThreadsLocked(&known_threads);
801 
802   bool succeded = true;
803   for (auto os_id : known_threads) {
804     uptr i = InternalLowerBound(threads, os_id);
805     if (i >= threads.size() || threads[i] != os_id) {
806       succeded = false;
807       Report(
808           "Running thread %zu was not suspended. False leaks are possible.\n",
809           os_id);
810     }
811   }
812   return succeded;
813 }
814 
815 #  endif  // !SANITIZER_FUCHSIA
816 
CheckForLeaksCallback(const SuspendedThreadsList & suspended_threads,void * arg)817 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
818                                   void *arg) {
819   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
820   CHECK(param);
821   CHECK(!param->success);
822   if (!ReportUnsuspendedThreads(suspended_threads)) {
823     switch (flags()->thread_suspend_fail) {
824       case 0:
825         param->success = true;
826         return;
827       case 1:
828         break;
829       case 2:
830         // Will crash on return.
831         return;
832     }
833   }
834   ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
835                     param->caller_sp);
836   ForEachChunk(CollectLeaksCb, &param->leaks);
837   // Clean up for subsequent leak checks. This assumes we did not overwrite any
838   // kIgnored tags.
839   ForEachChunk(ResetTagsCb, nullptr);
840   param->success = true;
841 }
842 
PrintResults(LeakReport & report)843 static bool PrintResults(LeakReport &report) {
844   uptr unsuppressed_count = report.UnsuppressedLeakCount();
845   if (unsuppressed_count) {
846     Decorator d;
847     Printf(
848         "\n"
849         "================================================================="
850         "\n");
851     Printf("%s", d.Error());
852     Report("ERROR: LeakSanitizer: detected memory leaks\n");
853     Printf("%s", d.Default());
854     report.ReportTopLeaks(flags()->max_leaks);
855   }
856   if (common_flags()->print_suppressions)
857     GetSuppressionContext()->PrintMatchedSuppressions();
858   if (unsuppressed_count)
859     report.PrintSummary();
860   if ((unsuppressed_count && common_flags()->verbosity >= 2) ||
861       flags()->log_threads)
862     PrintThreads();
863   return unsuppressed_count;
864 }
865 
CheckForLeaksOnce()866 static bool CheckForLeaksOnce() {
867   if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
868     VReport(1, "LeakSanitizer is disabled\n");
869     return false;
870   }
871   VReport(1, "LeakSanitizer: checking for leaks\n");
872   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
873   // suppressions. However if a stack id was previously suppressed, it should be
874   // suppressed in future checks as well.
875   for (int i = 0;; ++i) {
876     EnsureMainThreadIDIsCorrect();
877     CheckForLeaksParam param;
878     // Capture calling thread's stack pointer early, to avoid false negatives.
879     // Old frame with dead pointers might be overlapped by new frame inside
880     // CheckForLeaks which does not use bytes with pointers before the
881     // threads are suspended and stack pointers captured.
882     param.caller_tid = GetTid();
883     param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
884     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
885     if (!param.success) {
886       Report("LeakSanitizer has encountered a fatal error.\n");
887       Report(
888           "HINT: For debugging, try setting environment variable "
889           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
890       Report(
891           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
892           "etc)\n");
893       Die();
894     }
895     LeakReport leak_report;
896     leak_report.AddLeakedChunks(param.leaks);
897 
898     // No new suppressions stacks, so rerun will not help and we can report.
899     if (!leak_report.ApplySuppressions())
900       return PrintResults(leak_report);
901 
902     // No indirect leaks to report, so we are done here.
903     if (!leak_report.IndirectUnsuppressedLeakCount())
904       return PrintResults(leak_report);
905 
906     if (i >= 8) {
907       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
908       return PrintResults(leak_report);
909     }
910 
911     // We found a new previously unseen suppressed call stack. Rerun to make
912     // sure it does not hold indirect leaks.
913     VReport(1, "Rerun with %zu suppressed stacks.",
914             GetSuppressionContext()->GetSortedSuppressedStacks().size());
915   }
916 }
917 
CheckForLeaks()918 static bool CheckForLeaks() {
919   int leaking_tries = 0;
920   for (int i = 0; i < flags()->tries; ++i) leaking_tries += CheckForLeaksOnce();
921   return leaking_tries == flags()->tries;
922 }
923 
924 static bool has_reported_leaks = false;
HasReportedLeaks()925 bool HasReportedLeaks() { return has_reported_leaks; }
926 
DoLeakCheck()927 void DoLeakCheck() {
928   Lock l(&global_mutex);
929   static bool already_done;
930   if (already_done)
931     return;
932   already_done = true;
933   has_reported_leaks = CheckForLeaks();
934   if (has_reported_leaks)
935     HandleLeaks();
936 }
937 
DoRecoverableLeakCheck()938 static int DoRecoverableLeakCheck() {
939   Lock l(&global_mutex);
940   bool have_leaks = CheckForLeaks();
941   return have_leaks ? 1 : 0;
942 }
943 
DoRecoverableLeakCheckVoid()944 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
945 
946 ///// LeakReport implementation. /////
947 
948 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
949 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
950 // in real-world applications.
951 // FIXME: Get rid of this limit by moving logic into DedupLeaks.
952 const uptr kMaxLeaksConsidered = 5000;
953 
AddLeakedChunks(const LeakedChunks & chunks)954 void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
955   for (const LeakedChunk &leak : chunks) {
956     uptr chunk = leak.chunk;
957     u32 stack_trace_id = leak.stack_trace_id;
958     uptr leaked_size = leak.leaked_size;
959     ChunkTag tag = leak.tag;
960     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
961 
962     if (u32 resolution = flags()->resolution) {
963       StackTrace stack = StackDepotGet(stack_trace_id);
964       stack.size = Min(stack.size, resolution);
965       stack_trace_id = StackDepotPut(stack);
966     }
967 
968     bool is_directly_leaked = (tag == kDirectlyLeaked);
969     uptr i;
970     for (i = 0; i < leaks_.size(); i++) {
971       if (leaks_[i].stack_trace_id == stack_trace_id &&
972           leaks_[i].is_directly_leaked == is_directly_leaked) {
973         leaks_[i].hit_count++;
974         leaks_[i].total_size += leaked_size;
975         break;
976       }
977     }
978     if (i == leaks_.size()) {
979       if (leaks_.size() == kMaxLeaksConsidered)
980         return;
981       Leak leak = {next_id_++,         /* hit_count */ 1,
982                    leaked_size,        stack_trace_id,
983                    is_directly_leaked, /* is_suppressed */ false};
984       leaks_.push_back(leak);
985     }
986     if (flags()->report_objects) {
987       LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
988       leaked_objects_.push_back(obj);
989     }
990   }
991 }
992 
LeakComparator(const Leak & leak1,const Leak & leak2)993 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
994   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
995     return leak1.total_size > leak2.total_size;
996   else
997     return leak1.is_directly_leaked;
998 }
999 
ReportTopLeaks(uptr num_leaks_to_report)1000 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
1001   CHECK(leaks_.size() <= kMaxLeaksConsidered);
1002   Printf("\n");
1003   if (leaks_.size() == kMaxLeaksConsidered)
1004     Printf(
1005         "Too many leaks! Only the first %zu leaks encountered will be "
1006         "reported.\n",
1007         kMaxLeaksConsidered);
1008 
1009   uptr unsuppressed_count = UnsuppressedLeakCount();
1010   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
1011     Printf("The %zu top leak(s):\n", num_leaks_to_report);
1012   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
1013   uptr leaks_reported = 0;
1014   for (uptr i = 0; i < leaks_.size(); i++) {
1015     if (leaks_[i].is_suppressed)
1016       continue;
1017     PrintReportForLeak(i);
1018     leaks_reported++;
1019     if (leaks_reported == num_leaks_to_report)
1020       break;
1021   }
1022   if (leaks_reported < unsuppressed_count) {
1023     uptr remaining = unsuppressed_count - leaks_reported;
1024     Printf("Omitting %zu more leak(s).\n", remaining);
1025   }
1026 }
1027 
PrintReportForLeak(uptr index)1028 void LeakReport::PrintReportForLeak(uptr index) {
1029   Decorator d;
1030   Printf("%s", d.Leak());
1031   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
1032          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
1033          leaks_[index].total_size, leaks_[index].hit_count);
1034   Printf("%s", d.Default());
1035 
1036   CHECK(leaks_[index].stack_trace_id);
1037   StackDepotGet(leaks_[index].stack_trace_id).Print();
1038 
1039   if (flags()->report_objects) {
1040     Printf("Objects leaked above:\n");
1041     PrintLeakedObjectsForLeak(index);
1042     Printf("\n");
1043   }
1044 }
1045 
PrintLeakedObjectsForLeak(uptr index)1046 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
1047   u32 leak_id = leaks_[index].id;
1048   for (uptr j = 0; j < leaked_objects_.size(); j++) {
1049     if (leaked_objects_[j].leak_id == leak_id)
1050       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
1051              leaked_objects_[j].size);
1052   }
1053 }
1054 
PrintSummary()1055 void LeakReport::PrintSummary() {
1056   CHECK(leaks_.size() <= kMaxLeaksConsidered);
1057   uptr bytes = 0, allocations = 0;
1058   for (uptr i = 0; i < leaks_.size(); i++) {
1059     if (leaks_[i].is_suppressed)
1060       continue;
1061     bytes += leaks_[i].total_size;
1062     allocations += leaks_[i].hit_count;
1063   }
1064   InternalScopedString summary;
1065   summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
1066                   allocations);
1067   ReportErrorSummary(summary.data());
1068 }
1069 
ApplySuppressions()1070 uptr LeakReport::ApplySuppressions() {
1071   LeakSuppressionContext *suppressions = GetSuppressionContext();
1072   uptr new_suppressions = 0;
1073   for (uptr i = 0; i < leaks_.size(); i++) {
1074     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
1075                                leaks_[i].total_size)) {
1076       leaks_[i].is_suppressed = true;
1077       ++new_suppressions;
1078     }
1079   }
1080   return new_suppressions;
1081 }
1082 
UnsuppressedLeakCount()1083 uptr LeakReport::UnsuppressedLeakCount() {
1084   uptr result = 0;
1085   for (uptr i = 0; i < leaks_.size(); i++)
1086     if (!leaks_[i].is_suppressed)
1087       result++;
1088   return result;
1089 }
1090 
IndirectUnsuppressedLeakCount()1091 uptr LeakReport::IndirectUnsuppressedLeakCount() {
1092   uptr result = 0;
1093   for (uptr i = 0; i < leaks_.size(); i++)
1094     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
1095       result++;
1096   return result;
1097 }
1098 
1099 }  // namespace __lsan
1100 #else   // CAN_SANITIZE_LEAKS
1101 namespace __lsan {
InitCommonLsan()1102 void InitCommonLsan() {}
DoLeakCheck()1103 void DoLeakCheck() {}
DoRecoverableLeakCheckVoid()1104 void DoRecoverableLeakCheckVoid() {}
DisableInThisThread()1105 void DisableInThisThread() {}
EnableInThisThread()1106 void EnableInThisThread() {}
1107 }  // namespace __lsan
1108 #endif  // CAN_SANITIZE_LEAKS
1109 
1110 using namespace __lsan;
1111 
1112 extern "C" {
1113 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)1114 void __lsan_ignore_object(const void *p) {
1115 #if CAN_SANITIZE_LEAKS
1116   if (!common_flags()->detect_leaks)
1117     return;
1118   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
1119   // locked.
1120   Lock l(&global_mutex);
1121   IgnoreObjectResult res = IgnoreObject(p);
1122   if (res == kIgnoreObjectInvalid)
1123     VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
1124   if (res == kIgnoreObjectAlreadyIgnored)
1125     VReport(1,
1126             "__lsan_ignore_object(): "
1127             "heap object at %p is already being ignored\n",
1128             p);
1129   if (res == kIgnoreObjectSuccess)
1130     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
1131 #endif  // CAN_SANITIZE_LEAKS
1132 }
1133 
1134 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_register_root_region(const void * begin,uptr size)1135 void __lsan_register_root_region(const void *begin, uptr size) {
1136 #if CAN_SANITIZE_LEAKS
1137   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
1138   uptr b = reinterpret_cast<uptr>(begin);
1139   uptr e = b + size;
1140   CHECK_LT(b, e);
1141 
1142   Lock l(&global_mutex);
1143   ++GetRootRegionsLocked()[{b, e}];
1144 #endif  // CAN_SANITIZE_LEAKS
1145 }
1146 
1147 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_unregister_root_region(const void * begin,uptr size)1148 void __lsan_unregister_root_region(const void *begin, uptr size) {
1149 #if CAN_SANITIZE_LEAKS
1150   uptr b = reinterpret_cast<uptr>(begin);
1151   uptr e = b + size;
1152   CHECK_LT(b, e);
1153   VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
1154 
1155   {
1156     Lock l(&global_mutex);
1157     if (auto *f = GetRootRegionsLocked().find({b, e})) {
1158       if (--(f->second) == 0)
1159         GetRootRegionsLocked().erase(f);
1160       return;
1161     }
1162   }
1163   Report(
1164       "__lsan_unregister_root_region(): region at %p of size %zu has not "
1165       "been registered.\n",
1166       begin, size);
1167   Die();
1168 #endif  // CAN_SANITIZE_LEAKS
1169 }
1170 
1171 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()1172 void __lsan_disable() {
1173 #if CAN_SANITIZE_LEAKS
1174   __lsan::DisableInThisThread();
1175 #endif
1176 }
1177 
1178 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()1179 void __lsan_enable() {
1180 #if CAN_SANITIZE_LEAKS
1181   __lsan::EnableInThisThread();
1182 #endif
1183 }
1184 
1185 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()1186 void __lsan_do_leak_check() {
1187 #if CAN_SANITIZE_LEAKS
1188   if (common_flags()->detect_leaks)
1189     __lsan::DoLeakCheck();
1190 #endif  // CAN_SANITIZE_LEAKS
1191 }
1192 
1193 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_recoverable_leak_check()1194 int __lsan_do_recoverable_leak_check() {
1195 #if CAN_SANITIZE_LEAKS
1196   if (common_flags()->detect_leaks)
1197     return __lsan::DoRecoverableLeakCheck();
1198 #endif  // CAN_SANITIZE_LEAKS
1199   return 0;
1200 }
1201 
SANITIZER_INTERFACE_WEAK_DEF(const char *,__lsan_default_options,void)1202 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1203   return "";
1204 }
1205 
1206 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_WEAK_DEF(int,__lsan_is_turned_off,void)1207 SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
1208   return 0;
1209 }
1210 
SANITIZER_INTERFACE_WEAK_DEF(const char *,__lsan_default_suppressions,void)1211 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
1212   return "";
1213 }
1214 #endif
1215 }  // extern "C"
1216