xref: /freebsd/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1 //=-- lsan_common.h -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LSAN_COMMON_H
15 #define LSAN_COMMON_H
16 
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stoptheworld.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
24 
25 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
26 // Also, LSan doesn't like 32 bit architectures
27 // because of "small" (4 bytes) pointer size that leads to high false negative
28 // ratio on large leaks. But we still want to have it for some 32 bit arches
29 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
30 // To enable LeakSanitizer on a new architecture, one needs to implement the
31 // internal_clone function as well as (probably) adjust the TLS machinery for
32 // the new architecture inside the sanitizer library.
33 // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
34 // is missing. This caused a link error.
35 #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
36 #  define CAN_SANITIZE_LEAKS 0
37 #elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
38     (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) ||  \
39      defined(__powerpc64__) || defined(__s390x__))
40 #  define CAN_SANITIZE_LEAKS 1
41 #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
42 #  define CAN_SANITIZE_LEAKS 1
43 #elif defined(__arm__) && SANITIZER_LINUX
44 #  define CAN_SANITIZE_LEAKS 1
45 #elif SANITIZER_RISCV64 && SANITIZER_LINUX
46 #  define CAN_SANITIZE_LEAKS 1
47 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
48 #  define CAN_SANITIZE_LEAKS 1
49 #else
50 #  define CAN_SANITIZE_LEAKS 0
51 #endif
52 
53 namespace __sanitizer {
54 class FlagParser;
55 class ThreadRegistry;
56 class ThreadContextBase;
57 struct DTLS;
58 }
59 
60 namespace __lsan {
61 
62 // Chunk tags.
63 enum ChunkTag {
64   kDirectlyLeaked = 0,  // default
65   kIndirectlyLeaked = 1,
66   kReachable = 2,
67   kIgnored = 3
68 };
69 
70 struct Flags {
71 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
72 #include "lsan_flags.inc"
73 #undef LSAN_FLAG
74 
75   void SetDefaults();
76   uptr pointer_alignment() const {
77     return use_unaligned ? 1 : sizeof(uptr);
78   }
79 };
80 
81 extern Flags lsan_flags;
82 inline Flags *flags() { return &lsan_flags; }
83 void RegisterLsanFlags(FlagParser *parser, Flags *f);
84 
85 struct LeakedChunk {
86   uptr chunk;
87   u32 stack_trace_id;
88   uptr leaked_size;
89   ChunkTag tag;
90 };
91 
92 using LeakedChunks = InternalMmapVector<LeakedChunk>;
93 
94 struct Leak {
95   u32 id;
96   uptr hit_count;
97   uptr total_size;
98   u32 stack_trace_id;
99   bool is_directly_leaked;
100   bool is_suppressed;
101 };
102 
103 struct LeakedObject {
104   u32 leak_id;
105   uptr addr;
106   uptr size;
107 };
108 
109 // Aggregates leaks by stack trace prefix.
110 class LeakReport {
111  public:
112   LeakReport() {}
113   void AddLeakedChunks(const LeakedChunks &chunks);
114   void ReportTopLeaks(uptr max_leaks);
115   void PrintSummary();
116   uptr ApplySuppressions();
117   uptr UnsuppressedLeakCount();
118   uptr IndirectUnsuppressedLeakCount();
119 
120  private:
121   void PrintReportForLeak(uptr index);
122   void PrintLeakedObjectsForLeak(uptr index);
123 
124   u32 next_id_ = 0;
125   InternalMmapVector<Leak> leaks_;
126   InternalMmapVector<LeakedObject> leaked_objects_;
127 };
128 
129 typedef InternalMmapVector<uptr> Frontier;
130 
131 // Platform-specific functions.
132 void InitializePlatformSpecificModules();
133 void ProcessGlobalRegions(Frontier *frontier);
134 void ProcessPlatformSpecificAllocations(Frontier *frontier);
135 
136 struct RootRegion {
137   uptr begin;
138   uptr size;
139 };
140 
141 // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
142 // this Frontier vector before the StopTheWorldCallback actually runs.
143 // This is used when the OS has a unified callback API for suspending
144 // threads and enumerating roots.
145 struct CheckForLeaksParam {
146   Frontier frontier;
147   LeakedChunks leaks;
148   bool success = false;
149 };
150 
151 InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
152 void ScanRootRegion(Frontier *frontier, RootRegion const &region,
153                     uptr region_begin, uptr region_end, bool is_readable);
154 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
155 void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
156 // Run stoptheworld while holding any platform-specific locks, as well as the
157 // allocator and thread registry locks.
158 void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
159                               CheckForLeaksParam* argument);
160 
161 void ScanRangeForPointers(uptr begin, uptr end,
162                           Frontier *frontier,
163                           const char *region_type, ChunkTag tag);
164 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
165 
166 enum IgnoreObjectResult {
167   kIgnoreObjectSuccess,
168   kIgnoreObjectAlreadyIgnored,
169   kIgnoreObjectInvalid
170 };
171 
172 // Functions called from the parent tool.
173 const char *MaybeCallLsanDefaultOptions();
174 void InitCommonLsan();
175 void DoLeakCheck();
176 void DoRecoverableLeakCheckVoid();
177 void DisableCounterUnderflow();
178 bool DisabledInThisThread();
179 
180 // Used to implement __lsan::ScopedDisabler.
181 void DisableInThisThread();
182 void EnableInThisThread();
183 // Can be used to ignore memory allocated by an intercepted
184 // function.
185 struct ScopedInterceptorDisabler {
186   ScopedInterceptorDisabler() { DisableInThisThread(); }
187   ~ScopedInterceptorDisabler() { EnableInThisThread(); }
188 };
189 
190 // According to Itanium C++ ABI array cookie is a one word containing
191 // size of allocated array.
192 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
193                                            uptr addr) {
194   return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
195          *reinterpret_cast<uptr *>(chunk_beg) == 0;
196 }
197 
198 // According to ARM C++ ABI array cookie consists of two words:
199 // struct array_cookie {
200 //   std::size_t element_size; // element_size != 0
201 //   std::size_t element_count;
202 // };
203 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
204                                        uptr addr) {
205   return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
206          *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
207 }
208 
209 // Special case for "new T[0]" where T is a type with DTOR.
210 // new T[0] will allocate a cookie (one or two words) for the array size (0)
211 // and store a pointer to the end of allocated chunk. The actual cookie layout
212 // varies between platforms according to their C++ ABI implementation.
213 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
214                                         uptr addr) {
215 #if defined(__arm__)
216   return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
217 #else
218   return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
219 #endif
220 }
221 
222 // The following must be implemented in the parent tool.
223 
224 void ForEachChunk(ForEachChunkCallback callback, void *arg);
225 // Returns the address range occupied by the global allocator object.
226 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
227 // Wrappers for allocator's ForceLock()/ForceUnlock().
228 void LockAllocator();
229 void UnlockAllocator();
230 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
231 bool WordIsPoisoned(uptr addr);
232 // Wrappers for ThreadRegistry access.
233 void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
234 void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
235 
236 struct ScopedStopTheWorldLock {
237   ScopedStopTheWorldLock() {
238     LockThreadRegistry();
239     LockAllocator();
240   }
241 
242   ~ScopedStopTheWorldLock() {
243     UnlockAllocator();
244     UnlockThreadRegistry();
245   }
246 
247   ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
248   ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
249 };
250 
251 ThreadRegistry *GetThreadRegistryLocked();
252 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
253                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
254                            uptr *cache_end, DTLS **dtls);
255 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
256 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
257                             void *arg);
258 // If called from the main thread, updates the main thread's TID in the thread
259 // registry. We need this to handle processes that fork() without a subsequent
260 // exec(), which invalidates the recorded TID. To update it, we must call
261 // gettid() from the main thread. Our solution is to call this function before
262 // leak checking and also before every call to pthread_create() (to handle cases
263 // where leak checking is initiated from a non-main thread).
264 void EnsureMainThreadIDIsCorrect();
265 // If p points into a chunk that has been allocated to the user, returns its
266 // user-visible address. Otherwise, returns 0.
267 uptr PointsIntoChunk(void *p);
268 // Returns address of user-visible chunk contained in this allocator chunk.
269 uptr GetUserBegin(uptr chunk);
270 // Helper for __lsan_ignore_object().
271 IgnoreObjectResult IgnoreObjectLocked(const void *p);
272 
273 // Return the linker module, if valid for the platform.
274 LoadedModule *GetLinker();
275 
276 // Return true if LSan has finished leak checking and reported leaks.
277 bool HasReportedLeaks();
278 
279 // Run platform-specific leak handlers.
280 void HandleLeaks();
281 
282 // Wrapper for chunk metadata operations.
283 class LsanMetadata {
284  public:
285   // Constructor accepts address of user-visible chunk.
286   explicit LsanMetadata(uptr chunk);
287   bool allocated() const;
288   ChunkTag tag() const;
289   void set_tag(ChunkTag value);
290   uptr requested_size() const;
291   u32 stack_trace_id() const;
292  private:
293   void *metadata_;
294 };
295 
296 }  // namespace __lsan
297 
298 extern "C" {
299 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
300 const char *__lsan_default_options();
301 
302 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
303 int __lsan_is_turned_off();
304 
305 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
306 const char *__lsan_default_suppressions();
307 
308 SANITIZER_INTERFACE_ATTRIBUTE
309 void __lsan_register_root_region(const void *p, __lsan::uptr size);
310 
311 SANITIZER_INTERFACE_ATTRIBUTE
312 void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
313 
314 }  // extern "C"
315 
316 #endif  // LSAN_COMMON_H
317