xref: /freebsd/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h (revision a64729f5077d77e13b9497cb33ecb3c82e606ee8)
1 //=-- lsan_common.h -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LSAN_COMMON_H
15 #define LSAN_COMMON_H
16 
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_range.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stoptheworld.h"
24 #include "sanitizer_common/sanitizer_symbolizer.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 
27 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
28 // Also, LSan doesn't like 32 bit architectures
29 // because of "small" (4 bytes) pointer size that leads to high false negative
30 // ratio on large leaks. But we still want to have it for some 32 bit arches
31 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
32 // To enable LeakSanitizer on a new architecture, one needs to implement the
33 // internal_clone function as well as (probably) adjust the TLS machinery for
34 // the new architecture inside the sanitizer library.
35 // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
36 // is missing. This caused a link error.
37 #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
38 #  define CAN_SANITIZE_LEAKS 0
39 #elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
40     (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) ||  \
41      defined(__powerpc64__) || defined(__s390x__))
42 #  define CAN_SANITIZE_LEAKS 1
43 #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
44 #  define CAN_SANITIZE_LEAKS 1
45 #elif defined(__arm__) && SANITIZER_LINUX
46 #  define CAN_SANITIZE_LEAKS 1
47 #elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX
48 #  define CAN_SANITIZE_LEAKS 1
49 #elif SANITIZER_RISCV64 && SANITIZER_LINUX
50 #  define CAN_SANITIZE_LEAKS 1
51 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
52 #  define CAN_SANITIZE_LEAKS 1
53 #else
54 #  define CAN_SANITIZE_LEAKS 0
55 #endif
56 
57 namespace __sanitizer {
58 class FlagParser;
59 class ThreadRegistry;
60 class ThreadContextBase;
61 struct DTLS;
62 }
63 
64 // This section defines function and class prototypes which must be implemented
65 // by the parent tool linking in LSan. There are implementations provided by the
66 // LSan library which will be linked in when LSan is used as a standalone tool.
67 namespace __lsan {
68 
69 // Chunk tags.
70 enum ChunkTag {
71   kDirectlyLeaked = 0,  // default
72   kIndirectlyLeaked = 1,
73   kReachable = 2,
74   kIgnored = 3
75 };
76 
77 enum IgnoreObjectResult {
78   kIgnoreObjectSuccess,
79   kIgnoreObjectAlreadyIgnored,
80   kIgnoreObjectInvalid
81 };
82 
83 //// --------------------------------------------------------------------------
84 //// Poisoning prototypes.
85 //// --------------------------------------------------------------------------
86 
87 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
88 bool WordIsPoisoned(uptr addr);
89 
90 //// --------------------------------------------------------------------------
91 //// Thread prototypes.
92 //// --------------------------------------------------------------------------
93 
94 // Wrappers for ThreadRegistry access.
95 void LockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
96 void UnlockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
97 // If called from the main thread, updates the main thread's TID in the thread
98 // registry. We need this to handle processes that fork() without a subsequent
99 // exec(), which invalidates the recorded TID. To update it, we must call
100 // gettid() from the main thread. Our solution is to call this function before
101 // leak checking and also before every call to pthread_create() (to handle cases
102 // where leak checking is initiated from a non-main thread).
103 void EnsureMainThreadIDIsCorrect();
104 
105 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
106                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
107                            uptr *cache_end, DTLS **dtls);
108 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
109 void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
110 void GetThreadExtraStackRangesLocked(tid_t os_id,
111                                      InternalMmapVector<Range> *ranges);
112 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
113 void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
114 
115 //// --------------------------------------------------------------------------
116 //// Allocator prototypes.
117 //// --------------------------------------------------------------------------
118 
119 // Wrappers for allocator's ForceLock()/ForceUnlock().
120 void LockAllocator();
121 void UnlockAllocator();
122 
123 // Lock/unlock global mutext.
124 void LockGlobal();
125 void UnlockGlobal();
126 
127 // Returns the address range occupied by the global allocator object.
128 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
129 // If p points into a chunk that has been allocated to the user, returns its
130 // user-visible address. Otherwise, returns 0.
131 uptr PointsIntoChunk(void *p);
132 // Returns address of user-visible chunk contained in this allocator chunk.
133 uptr GetUserBegin(uptr chunk);
134 // Returns user-visible address for chunk. If memory tagging is used this
135 // function will return the tagged address.
136 uptr GetUserAddr(uptr chunk);
137 
138 // Wrapper for chunk metadata operations.
139 class LsanMetadata {
140  public:
141   // Constructor accepts address of user-visible chunk.
142   explicit LsanMetadata(uptr chunk);
143   bool allocated() const;
144   ChunkTag tag() const;
145   void set_tag(ChunkTag value);
146   uptr requested_size() const;
147   u32 stack_trace_id() const;
148 
149  private:
150   void *metadata_;
151 };
152 
153 // Iterate over all existing chunks. Allocator must be locked.
154 void ForEachChunk(ForEachChunkCallback callback, void *arg);
155 
156 // Helper for __lsan_ignore_object().
157 IgnoreObjectResult IgnoreObject(const void *p);
158 
159 // The rest of the LSan interface which is implemented by library.
160 
161 struct ScopedStopTheWorldLock {
162   ScopedStopTheWorldLock() {
163     LockThreads();
164     LockAllocator();
165   }
166 
167   ~ScopedStopTheWorldLock() {
168     UnlockAllocator();
169     UnlockThreads();
170   }
171 
172   ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
173   ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
174 };
175 
176 struct Flags {
177 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
178 #include "lsan_flags.inc"
179 #undef LSAN_FLAG
180 
181   void SetDefaults();
182   uptr pointer_alignment() const {
183     return use_unaligned ? 1 : sizeof(uptr);
184   }
185 };
186 
187 extern Flags lsan_flags;
188 inline Flags *flags() { return &lsan_flags; }
189 void RegisterLsanFlags(FlagParser *parser, Flags *f);
190 
191 struct LeakedChunk {
192   uptr chunk;
193   u32 stack_trace_id;
194   uptr leaked_size;
195   ChunkTag tag;
196 };
197 
198 using LeakedChunks = InternalMmapVector<LeakedChunk>;
199 
200 struct Leak {
201   u32 id;
202   uptr hit_count;
203   uptr total_size;
204   u32 stack_trace_id;
205   bool is_directly_leaked;
206   bool is_suppressed;
207 };
208 
209 struct LeakedObject {
210   u32 leak_id;
211   uptr addr;
212   uptr size;
213 };
214 
215 // Aggregates leaks by stack trace prefix.
216 class LeakReport {
217  public:
218   LeakReport() {}
219   void AddLeakedChunks(const LeakedChunks &chunks);
220   void ReportTopLeaks(uptr max_leaks);
221   void PrintSummary();
222   uptr ApplySuppressions();
223   uptr UnsuppressedLeakCount();
224   uptr IndirectUnsuppressedLeakCount();
225 
226  private:
227   void PrintReportForLeak(uptr index);
228   void PrintLeakedObjectsForLeak(uptr index);
229 
230   u32 next_id_ = 0;
231   InternalMmapVector<Leak> leaks_;
232   InternalMmapVector<LeakedObject> leaked_objects_;
233 };
234 
235 typedef InternalMmapVector<uptr> Frontier;
236 
237 // Platform-specific functions.
238 void InitializePlatformSpecificModules();
239 void ProcessGlobalRegions(Frontier *frontier);
240 void ProcessPlatformSpecificAllocations(Frontier *frontier);
241 
242 // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
243 // this Frontier vector before the StopTheWorldCallback actually runs.
244 // This is used when the OS has a unified callback API for suspending
245 // threads and enumerating roots.
246 struct CheckForLeaksParam {
247   Frontier frontier;
248   LeakedChunks leaks;
249   tid_t caller_tid;
250   uptr caller_sp;
251   bool success = false;
252 };
253 
254 using Region = Range;
255 
256 bool HasRootRegions();
257 void ScanRootRegions(Frontier *frontier,
258                      const InternalMmapVectorNoCtor<Region> &region);
259 // Run stoptheworld while holding any platform-specific locks, as well as the
260 // allocator and thread registry locks.
261 void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
262                               CheckForLeaksParam* argument);
263 
264 void ScanRangeForPointers(uptr begin, uptr end,
265                           Frontier *frontier,
266                           const char *region_type, ChunkTag tag);
267 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
268 void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
269                           Frontier *frontier);
270 
271 // Functions called from the parent tool.
272 const char *MaybeCallLsanDefaultOptions();
273 void InitCommonLsan();
274 void DoLeakCheck();
275 void DoRecoverableLeakCheckVoid();
276 void DisableCounterUnderflow();
277 bool DisabledInThisThread();
278 
279 // Used to implement __lsan::ScopedDisabler.
280 void DisableInThisThread();
281 void EnableInThisThread();
282 // Can be used to ignore memory allocated by an intercepted
283 // function.
284 struct ScopedInterceptorDisabler {
285   ScopedInterceptorDisabler() { DisableInThisThread(); }
286   ~ScopedInterceptorDisabler() { EnableInThisThread(); }
287 };
288 
289 // According to Itanium C++ ABI array cookie is a one word containing
290 // size of allocated array.
291 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
292                                            uptr addr) {
293   return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
294          *reinterpret_cast<uptr *>(chunk_beg) == 0;
295 }
296 
297 // According to ARM C++ ABI array cookie consists of two words:
298 // struct array_cookie {
299 //   std::size_t element_size; // element_size != 0
300 //   std::size_t element_count;
301 // };
302 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
303                                        uptr addr) {
304   return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
305          *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
306 }
307 
308 // Special case for "new T[0]" where T is a type with DTOR.
309 // new T[0] will allocate a cookie (one or two words) for the array size (0)
310 // and store a pointer to the end of allocated chunk. The actual cookie layout
311 // varies between platforms according to their C++ ABI implementation.
312 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
313                                         uptr addr) {
314 #if defined(__arm__)
315   return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
316 #else
317   return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
318 #endif
319 }
320 
321 // Return the linker module, if valid for the platform.
322 LoadedModule *GetLinker();
323 
324 // Return true if LSan has finished leak checking and reported leaks.
325 bool HasReportedLeaks();
326 
327 // Run platform-specific leak handlers.
328 void HandleLeaks();
329 
330 }  // namespace __lsan
331 
332 extern "C" {
333 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
334 const char *__lsan_default_options();
335 
336 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
337 int __lsan_is_turned_off();
338 
339 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
340 const char *__lsan_default_suppressions();
341 
342 SANITIZER_INTERFACE_ATTRIBUTE
343 void __lsan_register_root_region(const void *p, __lsan::uptr size);
344 
345 SANITIZER_INTERFACE_ATTRIBUTE
346 void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
347 
348 }  // extern "C"
349 
350 #endif  // LSAN_COMMON_H
351