xref: /freebsd/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //=-- lsan_common.h -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LSAN_COMMON_H
15 #define LSAN_COMMON_H
16 
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stoptheworld.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
24 
25 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
26 // Also, LSan doesn't like 32 bit architectures
27 // because of "small" (4 bytes) pointer size that leads to high false negative
28 // ratio on large leaks. But we still want to have it for some 32 bit arches
29 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
30 // To enable LeakSanitizer on a new architecture, one needs to implement the
31 // internal_clone function as well as (probably) adjust the TLS machinery for
32 // the new architecture inside the sanitizer library.
33 // Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
34 // is missing. This caused a link error.
35 #if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
36 #define CAN_SANITIZE_LEAKS 0
37 #elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \
38     (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) ||  \
39      defined(__powerpc64__) || defined(__s390x__))
40 #define CAN_SANITIZE_LEAKS 1
41 #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC)
42 #define CAN_SANITIZE_LEAKS 1
43 #elif defined(__arm__) && SANITIZER_LINUX
44 #define CAN_SANITIZE_LEAKS 1
45 #elif SANITIZER_RISCV64 && SANITIZER_LINUX
46 #define CAN_SANITIZE_LEAKS 1
47 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
48 #define CAN_SANITIZE_LEAKS 1
49 #else
50 #define CAN_SANITIZE_LEAKS 0
51 #endif
52 
53 namespace __sanitizer {
54 class FlagParser;
55 class ThreadRegistry;
56 class ThreadContextBase;
57 struct DTLS;
58 }
59 
60 namespace __lsan {
61 
62 // Chunk tags.
63 enum ChunkTag {
64   kDirectlyLeaked = 0,  // default
65   kIndirectlyLeaked = 1,
66   kReachable = 2,
67   kIgnored = 3
68 };
69 
70 struct Flags {
71 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
72 #include "lsan_flags.inc"
73 #undef LSAN_FLAG
74 
75   void SetDefaults();
76   uptr pointer_alignment() const {
77     return use_unaligned ? 1 : sizeof(uptr);
78   }
79 };
80 
81 extern Flags lsan_flags;
82 inline Flags *flags() { return &lsan_flags; }
83 void RegisterLsanFlags(FlagParser *parser, Flags *f);
84 
85 struct Leak {
86   u32 id;
87   uptr hit_count;
88   uptr total_size;
89   u32 stack_trace_id;
90   bool is_directly_leaked;
91   bool is_suppressed;
92 };
93 
94 struct LeakedObject {
95   u32 leak_id;
96   uptr addr;
97   uptr size;
98 };
99 
100 // Aggregates leaks by stack trace prefix.
101 class LeakReport {
102  public:
103   LeakReport() {}
104   void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
105                       ChunkTag tag);
106   void ReportTopLeaks(uptr max_leaks);
107   void PrintSummary();
108   uptr ApplySuppressions();
109   uptr UnsuppressedLeakCount();
110   uptr IndirectUnsuppressedLeakCount();
111 
112  private:
113   void PrintReportForLeak(uptr index);
114   void PrintLeakedObjectsForLeak(uptr index);
115 
116   u32 next_id_ = 0;
117   InternalMmapVector<Leak> leaks_;
118   InternalMmapVector<LeakedObject> leaked_objects_;
119 };
120 
121 typedef InternalMmapVector<uptr> Frontier;
122 
123 // Platform-specific functions.
124 void InitializePlatformSpecificModules();
125 void ProcessGlobalRegions(Frontier *frontier);
126 void ProcessPlatformSpecificAllocations(Frontier *frontier);
127 
128 struct RootRegion {
129   uptr begin;
130   uptr size;
131 };
132 
133 // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
134 // this Frontier vector before the StopTheWorldCallback actually runs.
135 // This is used when the OS has a unified callback API for suspending
136 // threads and enumerating roots.
137 struct CheckForLeaksParam {
138   Frontier frontier;
139   LeakReport leak_report;
140   bool success = false;
141 };
142 
143 InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
144 void ScanRootRegion(Frontier *frontier, RootRegion const &region,
145                     uptr region_begin, uptr region_end, bool is_readable);
146 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
147 void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
148 // Run stoptheworld while holding any platform-specific locks, as well as the
149 // allocator and thread registry locks.
150 void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
151                               CheckForLeaksParam* argument);
152 
153 void ScanRangeForPointers(uptr begin, uptr end,
154                           Frontier *frontier,
155                           const char *region_type, ChunkTag tag);
156 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
157 
158 enum IgnoreObjectResult {
159   kIgnoreObjectSuccess,
160   kIgnoreObjectAlreadyIgnored,
161   kIgnoreObjectInvalid
162 };
163 
164 // Functions called from the parent tool.
165 const char *MaybeCallLsanDefaultOptions();
166 void InitCommonLsan();
167 void DoLeakCheck();
168 void DoRecoverableLeakCheckVoid();
169 void DisableCounterUnderflow();
170 bool DisabledInThisThread();
171 
172 // Used to implement __lsan::ScopedDisabler.
173 void DisableInThisThread();
174 void EnableInThisThread();
175 // Can be used to ignore memory allocated by an intercepted
176 // function.
177 struct ScopedInterceptorDisabler {
178   ScopedInterceptorDisabler() { DisableInThisThread(); }
179   ~ScopedInterceptorDisabler() { EnableInThisThread(); }
180 };
181 
182 // According to Itanium C++ ABI array cookie is a one word containing
183 // size of allocated array.
184 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
185                                            uptr addr) {
186   return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
187          *reinterpret_cast<uptr *>(chunk_beg) == 0;
188 }
189 
190 // According to ARM C++ ABI array cookie consists of two words:
191 // struct array_cookie {
192 //   std::size_t element_size; // element_size != 0
193 //   std::size_t element_count;
194 // };
195 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
196                                        uptr addr) {
197   return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
198          *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
199 }
200 
201 // Special case for "new T[0]" where T is a type with DTOR.
202 // new T[0] will allocate a cookie (one or two words) for the array size (0)
203 // and store a pointer to the end of allocated chunk. The actual cookie layout
204 // varies between platforms according to their C++ ABI implementation.
205 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
206                                         uptr addr) {
207 #if defined(__arm__)
208   return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
209 #else
210   return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
211 #endif
212 }
213 
214 // The following must be implemented in the parent tool.
215 
216 void ForEachChunk(ForEachChunkCallback callback, void *arg);
217 // Returns the address range occupied by the global allocator object.
218 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
219 // Wrappers for allocator's ForceLock()/ForceUnlock().
220 void LockAllocator();
221 void UnlockAllocator();
222 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
223 bool WordIsPoisoned(uptr addr);
224 // Wrappers for ThreadRegistry access.
225 void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
226 void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
227 ThreadRegistry *GetThreadRegistryLocked();
228 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
229                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
230                            uptr *cache_end, DTLS **dtls);
231 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
232 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
233                             void *arg);
234 // If called from the main thread, updates the main thread's TID in the thread
235 // registry. We need this to handle processes that fork() without a subsequent
236 // exec(), which invalidates the recorded TID. To update it, we must call
237 // gettid() from the main thread. Our solution is to call this function before
238 // leak checking and also before every call to pthread_create() (to handle cases
239 // where leak checking is initiated from a non-main thread).
240 void EnsureMainThreadIDIsCorrect();
241 // If p points into a chunk that has been allocated to the user, returns its
242 // user-visible address. Otherwise, returns 0.
243 uptr PointsIntoChunk(void *p);
244 // Returns address of user-visible chunk contained in this allocator chunk.
245 uptr GetUserBegin(uptr chunk);
246 // Helper for __lsan_ignore_object().
247 IgnoreObjectResult IgnoreObjectLocked(const void *p);
248 
249 // Return the linker module, if valid for the platform.
250 LoadedModule *GetLinker();
251 
252 // Return true if LSan has finished leak checking and reported leaks.
253 bool HasReportedLeaks();
254 
255 // Run platform-specific leak handlers.
256 void HandleLeaks();
257 
258 // Wrapper for chunk metadata operations.
259 class LsanMetadata {
260  public:
261   // Constructor accepts address of user-visible chunk.
262   explicit LsanMetadata(uptr chunk);
263   bool allocated() const;
264   ChunkTag tag() const;
265   void set_tag(ChunkTag value);
266   uptr requested_size() const;
267   u32 stack_trace_id() const;
268  private:
269   void *metadata_;
270 };
271 
272 }  // namespace __lsan
273 
274 extern "C" {
275 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
276 const char *__lsan_default_options();
277 
278 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
279 int __lsan_is_turned_off();
280 
281 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
282 const char *__lsan_default_suppressions();
283 
284 SANITIZER_INTERFACE_ATTRIBUTE
285 void __lsan_register_root_region(const void *p, __lsan::uptr size);
286 
287 SANITIZER_INTERFACE_ATTRIBUTE
288 void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
289 
290 }  // extern "C"
291 
292 #endif  // LSAN_COMMON_H
293