xref: /freebsd/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 //=-- lsan_common.h -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LSAN_COMMON_H
15 #define LSAN_COMMON_H
16 
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stoptheworld.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
24 #include "sanitizer_common/sanitizer_thread_registry.h"
25 
26 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
27 // Also, LSan doesn't like 32 bit architectures
28 // because of "small" (4 bytes) pointer size that leads to high false negative
29 // ratio on large leaks. But we still want to have it for some 32 bit arches
30 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
31 // To enable LeakSanitizer on a new architecture, one needs to implement the
32 // internal_clone function as well as (probably) adjust the TLS machinery for
33 // the new architecture inside the sanitizer library.
34 // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
35 // is missing. This caused a link error.
36 #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
37 #  define CAN_SANITIZE_LEAKS 0
38 #elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
39     (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) ||  \
40      defined(__powerpc64__) || defined(__s390x__))
41 #  define CAN_SANITIZE_LEAKS 1
42 #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
43 #  define CAN_SANITIZE_LEAKS 1
44 #elif defined(__arm__) && SANITIZER_LINUX
45 #  define CAN_SANITIZE_LEAKS 1
46 #elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX
47 #  define CAN_SANITIZE_LEAKS 1
48 #elif SANITIZER_RISCV64 && SANITIZER_LINUX
49 #  define CAN_SANITIZE_LEAKS 1
50 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
51 #  define CAN_SANITIZE_LEAKS 1
52 #else
53 #  define CAN_SANITIZE_LEAKS 0
54 #endif
55 
56 namespace __sanitizer {
57 class FlagParser;
58 class ThreadRegistry;
59 class ThreadContextBase;
60 struct DTLS;
61 }
62 
63 // This section defines function and class prototypes which must be implemented
64 // by the parent tool linking in LSan. There are implementations provided by the
65 // LSan library which will be linked in when LSan is used as a standalone tool.
66 namespace __lsan {
67 
68 // Chunk tags.
69 enum ChunkTag {
70   kDirectlyLeaked = 0,  // default
71   kIndirectlyLeaked = 1,
72   kReachable = 2,
73   kIgnored = 3
74 };
75 
76 enum IgnoreObjectResult {
77   kIgnoreObjectSuccess,
78   kIgnoreObjectAlreadyIgnored,
79   kIgnoreObjectInvalid
80 };
81 
82 struct Range {
83   uptr begin;
84   uptr end;
85 };
86 
87 //// --------------------------------------------------------------------------
88 //// Poisoning prototypes.
89 //// --------------------------------------------------------------------------
90 
91 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
92 bool WordIsPoisoned(uptr addr);
93 
94 //// --------------------------------------------------------------------------
95 //// Thread prototypes.
96 //// --------------------------------------------------------------------------
97 
98 // Wrappers for ThreadRegistry access.
99 void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
100 void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
101 // If called from the main thread, updates the main thread's TID in the thread
102 // registry. We need this to handle processes that fork() without a subsequent
103 // exec(), which invalidates the recorded TID. To update it, we must call
104 // gettid() from the main thread. Our solution is to call this function before
105 // leak checking and also before every call to pthread_create() (to handle cases
106 // where leak checking is initiated from a non-main thread).
107 void EnsureMainThreadIDIsCorrect();
108 
109 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
110                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
111                            uptr *cache_end, DTLS **dtls);
112 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
113 void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
114 void GetThreadExtraStackRangesLocked(tid_t os_id,
115                                      InternalMmapVector<Range> *ranges);
116 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
117 void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
118 
119 //// --------------------------------------------------------------------------
120 //// Allocator prototypes.
121 //// --------------------------------------------------------------------------
122 
123 // Wrappers for allocator's ForceLock()/ForceUnlock().
124 void LockAllocator();
125 void UnlockAllocator();
126 
127 // Returns the address range occupied by the global allocator object.
128 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
129 // If p points into a chunk that has been allocated to the user, returns its
130 // user-visible address. Otherwise, returns 0.
131 uptr PointsIntoChunk(void *p);
132 // Returns address of user-visible chunk contained in this allocator chunk.
133 uptr GetUserBegin(uptr chunk);
134 
135 // Wrapper for chunk metadata operations.
136 class LsanMetadata {
137  public:
138   // Constructor accepts address of user-visible chunk.
139   explicit LsanMetadata(uptr chunk);
140   bool allocated() const;
141   ChunkTag tag() const;
142   void set_tag(ChunkTag value);
143   uptr requested_size() const;
144   u32 stack_trace_id() const;
145 
146  private:
147   void *metadata_;
148 };
149 
150 // Iterate over all existing chunks. Allocator must be locked.
151 void ForEachChunk(ForEachChunkCallback callback, void *arg);
152 
153 // Helper for __lsan_ignore_object().
154 IgnoreObjectResult IgnoreObjectLocked(const void *p);
155 
156 // The rest of the LSan interface which is implemented by library.
157 
158 struct ScopedStopTheWorldLock {
159   ScopedStopTheWorldLock() {
160     LockThreadRegistry();
161     LockAllocator();
162   }
163 
164   ~ScopedStopTheWorldLock() {
165     UnlockAllocator();
166     UnlockThreadRegistry();
167   }
168 
169   ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
170   ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
171 };
172 
173 struct Flags {
174 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
175 #include "lsan_flags.inc"
176 #undef LSAN_FLAG
177 
178   void SetDefaults();
179   uptr pointer_alignment() const {
180     return use_unaligned ? 1 : sizeof(uptr);
181   }
182 };
183 
184 extern Flags lsan_flags;
185 inline Flags *flags() { return &lsan_flags; }
186 void RegisterLsanFlags(FlagParser *parser, Flags *f);
187 
188 struct LeakedChunk {
189   uptr chunk;
190   u32 stack_trace_id;
191   uptr leaked_size;
192   ChunkTag tag;
193 };
194 
195 using LeakedChunks = InternalMmapVector<LeakedChunk>;
196 
197 struct Leak {
198   u32 id;
199   uptr hit_count;
200   uptr total_size;
201   u32 stack_trace_id;
202   bool is_directly_leaked;
203   bool is_suppressed;
204 };
205 
206 struct LeakedObject {
207   u32 leak_id;
208   uptr addr;
209   uptr size;
210 };
211 
212 // Aggregates leaks by stack trace prefix.
213 class LeakReport {
214  public:
215   LeakReport() {}
216   void AddLeakedChunks(const LeakedChunks &chunks);
217   void ReportTopLeaks(uptr max_leaks);
218   void PrintSummary();
219   uptr ApplySuppressions();
220   uptr UnsuppressedLeakCount();
221   uptr IndirectUnsuppressedLeakCount();
222 
223  private:
224   void PrintReportForLeak(uptr index);
225   void PrintLeakedObjectsForLeak(uptr index);
226 
227   u32 next_id_ = 0;
228   InternalMmapVector<Leak> leaks_;
229   InternalMmapVector<LeakedObject> leaked_objects_;
230 };
231 
232 typedef InternalMmapVector<uptr> Frontier;
233 
234 // Platform-specific functions.
235 void InitializePlatformSpecificModules();
236 void ProcessGlobalRegions(Frontier *frontier);
237 void ProcessPlatformSpecificAllocations(Frontier *frontier);
238 
239 struct RootRegion {
240   uptr begin;
241   uptr size;
242 };
243 
244 // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
245 // this Frontier vector before the StopTheWorldCallback actually runs.
246 // This is used when the OS has a unified callback API for suspending
247 // threads and enumerating roots.
248 struct CheckForLeaksParam {
249   Frontier frontier;
250   LeakedChunks leaks;
251   tid_t caller_tid;
252   uptr caller_sp;
253   bool success = false;
254 };
255 
256 InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
257 void ScanRootRegion(Frontier *frontier, RootRegion const &region,
258                     uptr region_begin, uptr region_end, bool is_readable);
259 // Run stoptheworld while holding any platform-specific locks, as well as the
260 // allocator and thread registry locks.
261 void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
262                               CheckForLeaksParam* argument);
263 
264 void ScanRangeForPointers(uptr begin, uptr end,
265                           Frontier *frontier,
266                           const char *region_type, ChunkTag tag);
267 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
268 void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
269                           Frontier *frontier);
270 
271 // Functions called from the parent tool.
272 const char *MaybeCallLsanDefaultOptions();
273 void InitCommonLsan();
274 void DoLeakCheck();
275 void DoRecoverableLeakCheckVoid();
276 void DisableCounterUnderflow();
277 bool DisabledInThisThread();
278 
279 // Used to implement __lsan::ScopedDisabler.
280 void DisableInThisThread();
281 void EnableInThisThread();
282 // Can be used to ignore memory allocated by an intercepted
283 // function.
284 struct ScopedInterceptorDisabler {
285   ScopedInterceptorDisabler() { DisableInThisThread(); }
286   ~ScopedInterceptorDisabler() { EnableInThisThread(); }
287 };
288 
289 // According to Itanium C++ ABI array cookie is a one word containing
290 // size of allocated array.
291 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
292                                            uptr addr) {
293   return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
294          *reinterpret_cast<uptr *>(chunk_beg) == 0;
295 }
296 
297 // According to ARM C++ ABI array cookie consists of two words:
298 // struct array_cookie {
299 //   std::size_t element_size; // element_size != 0
300 //   std::size_t element_count;
301 // };
302 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
303                                        uptr addr) {
304   return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
305          *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
306 }
307 
308 // Special case for "new T[0]" where T is a type with DTOR.
309 // new T[0] will allocate a cookie (one or two words) for the array size (0)
310 // and store a pointer to the end of allocated chunk. The actual cookie layout
311 // varies between platforms according to their C++ ABI implementation.
312 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
313                                         uptr addr) {
314 #if defined(__arm__)
315   return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
316 #else
317   return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
318 #endif
319 }
320 
321 // Return the linker module, if valid for the platform.
322 LoadedModule *GetLinker();
323 
324 // Return true if LSan has finished leak checking and reported leaks.
325 bool HasReportedLeaks();
326 
327 // Run platform-specific leak handlers.
328 void HandleLeaks();
329 
330 }  // namespace __lsan
331 
332 extern "C" {
333 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
334 const char *__lsan_default_options();
335 
336 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
337 int __lsan_is_turned_off();
338 
339 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
340 const char *__lsan_default_suppressions();
341 
342 SANITIZER_INTERFACE_ATTRIBUTE
343 void __lsan_register_root_region(const void *p, __lsan::uptr size);
344 
345 SANITIZER_INTERFACE_ATTRIBUTE
346 void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
347 
348 }  // extern "C"
349 
350 #endif  // LSAN_COMMON_H
351