xref: /freebsd/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h (revision 25ecdc7d52770caf1c9b44b5ec11f468f6b636f3)
1 //=-- lsan_common.h -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LSAN_COMMON_H
15 #define LSAN_COMMON_H
16 
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_stoptheworld.h"
22 #include "sanitizer_common/sanitizer_symbolizer.h"
23 
24 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
25 // Also, LSan doesn't like 32 bit architectures
26 // because of "small" (4 bytes) pointer size that leads to high false negative
27 // ratio on large leaks. But we still want to have it for some 32 bit arches
28 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
29 // To enable LeakSanitizer on a new architecture, one needs to implement the
30 // internal_clone function as well as (probably) adjust the TLS machinery for
31 // the new architecture inside the sanitizer library.
32 #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) &&          \
33     (SANITIZER_WORDSIZE == 64) &&                                        \
34     (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
35      defined(__powerpc64__) || defined(__s390x__))
36 #define CAN_SANITIZE_LEAKS 1
37 #elif defined(__i386__) && \
38     (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
39 #define CAN_SANITIZE_LEAKS 1
40 #elif defined(__arm__) && \
41     SANITIZER_LINUX && !SANITIZER_ANDROID
42 #define CAN_SANITIZE_LEAKS 1
43 #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
44 #define CAN_SANITIZE_LEAKS 1
45 #else
46 #define CAN_SANITIZE_LEAKS 0
47 #endif
48 
49 namespace __sanitizer {
50 class FlagParser;
51 class ThreadRegistry;
52 struct DTLS;
53 }
54 
55 namespace __lsan {
56 
57 // Chunk tags.
58 enum ChunkTag {
59   kDirectlyLeaked = 0,  // default
60   kIndirectlyLeaked = 1,
61   kReachable = 2,
62   kIgnored = 3
63 };
64 
65 const u32 kInvalidTid = (u32) -1;
66 
67 struct Flags {
68 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
69 #include "lsan_flags.inc"
70 #undef LSAN_FLAG
71 
72   void SetDefaults();
73   uptr pointer_alignment() const {
74     return use_unaligned ? 1 : sizeof(uptr);
75   }
76 };
77 
78 extern Flags lsan_flags;
79 inline Flags *flags() { return &lsan_flags; }
80 void RegisterLsanFlags(FlagParser *parser, Flags *f);
81 
82 struct Leak {
83   u32 id;
84   uptr hit_count;
85   uptr total_size;
86   u32 stack_trace_id;
87   bool is_directly_leaked;
88   bool is_suppressed;
89 };
90 
91 struct LeakedObject {
92   u32 leak_id;
93   uptr addr;
94   uptr size;
95 };
96 
97 // Aggregates leaks by stack trace prefix.
98 class LeakReport {
99  public:
100   LeakReport() {}
101   void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
102                       ChunkTag tag);
103   void ReportTopLeaks(uptr max_leaks);
104   void PrintSummary();
105   void ApplySuppressions();
106   uptr UnsuppressedLeakCount();
107 
108  private:
109   void PrintReportForLeak(uptr index);
110   void PrintLeakedObjectsForLeak(uptr index);
111 
112   u32 next_id_ = 0;
113   InternalMmapVector<Leak> leaks_;
114   InternalMmapVector<LeakedObject> leaked_objects_;
115 };
116 
117 typedef InternalMmapVector<uptr> Frontier;
118 
119 // Platform-specific functions.
120 void InitializePlatformSpecificModules();
121 void ProcessGlobalRegions(Frontier *frontier);
122 void ProcessPlatformSpecificAllocations(Frontier *frontier);
123 
124 struct RootRegion {
125   uptr begin;
126   uptr size;
127 };
128 
129 // LockStuffAndStopTheWorld can start to use Scan* calls to collect into
130 // this Frontier vector before the StopTheWorldCallback actually runs.
131 // This is used when the OS has a unified callback API for suspending
132 // threads and enumerating roots.
133 struct CheckForLeaksParam {
134   Frontier frontier;
135   LeakReport leak_report;
136   bool success = false;
137 };
138 
139 InternalMmapVector<RootRegion> const *GetRootRegions();
140 void ScanRootRegion(Frontier *frontier, RootRegion const &region,
141                     uptr region_begin, uptr region_end, bool is_readable);
142 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
143 // Run stoptheworld while holding any platform-specific locks, as well as the
144 // allocator and thread registry locks.
145 void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
146                               CheckForLeaksParam* argument);
147 
148 void ScanRangeForPointers(uptr begin, uptr end,
149                           Frontier *frontier,
150                           const char *region_type, ChunkTag tag);
151 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
152 
153 enum IgnoreObjectResult {
154   kIgnoreObjectSuccess,
155   kIgnoreObjectAlreadyIgnored,
156   kIgnoreObjectInvalid
157 };
158 
159 // Functions called from the parent tool.
160 const char *MaybeCallLsanDefaultOptions();
161 void InitCommonLsan();
162 void DoLeakCheck();
163 void DoRecoverableLeakCheckVoid();
164 void DisableCounterUnderflow();
165 bool DisabledInThisThread();
166 
167 // Used to implement __lsan::ScopedDisabler.
168 void DisableInThisThread();
169 void EnableInThisThread();
170 // Can be used to ignore memory allocated by an intercepted
171 // function.
172 struct ScopedInterceptorDisabler {
173   ScopedInterceptorDisabler() { DisableInThisThread(); }
174   ~ScopedInterceptorDisabler() { EnableInThisThread(); }
175 };
176 
177 // According to Itanium C++ ABI array cookie is a one word containing
178 // size of allocated array.
179 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
180                                            uptr addr) {
181   return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
182          *reinterpret_cast<uptr *>(chunk_beg) == 0;
183 }
184 
185 // According to ARM C++ ABI array cookie consists of two words:
186 // struct array_cookie {
187 //   std::size_t element_size; // element_size != 0
188 //   std::size_t element_count;
189 // };
190 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
191                                        uptr addr) {
192   return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
193          *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
194 }
195 
196 // Special case for "new T[0]" where T is a type with DTOR.
197 // new T[0] will allocate a cookie (one or two words) for the array size (0)
198 // and store a pointer to the end of allocated chunk. The actual cookie layout
199 // varies between platforms according to their C++ ABI implementation.
200 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
201                                         uptr addr) {
202 #if defined(__arm__)
203   return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
204 #else
205   return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
206 #endif
207 }
208 
209 // The following must be implemented in the parent tool.
210 
211 void ForEachChunk(ForEachChunkCallback callback, void *arg);
212 // Returns the address range occupied by the global allocator object.
213 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
214 // Wrappers for allocator's ForceLock()/ForceUnlock().
215 void LockAllocator();
216 void UnlockAllocator();
217 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
218 bool WordIsPoisoned(uptr addr);
219 // Wrappers for ThreadRegistry access.
220 void LockThreadRegistry();
221 void UnlockThreadRegistry();
222 ThreadRegistry *GetThreadRegistryLocked();
223 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
224                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
225                            uptr *cache_end, DTLS **dtls);
226 void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
227 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
228                             void *arg);
229 // If called from the main thread, updates the main thread's TID in the thread
230 // registry. We need this to handle processes that fork() without a subsequent
231 // exec(), which invalidates the recorded TID. To update it, we must call
232 // gettid() from the main thread. Our solution is to call this function before
233 // leak checking and also before every call to pthread_create() (to handle cases
234 // where leak checking is initiated from a non-main thread).
235 void EnsureMainThreadIDIsCorrect();
236 // If p points into a chunk that has been allocated to the user, returns its
237 // user-visible address. Otherwise, returns 0.
238 uptr PointsIntoChunk(void *p);
239 // Returns address of user-visible chunk contained in this allocator chunk.
240 uptr GetUserBegin(uptr chunk);
241 // Helper for __lsan_ignore_object().
242 IgnoreObjectResult IgnoreObjectLocked(const void *p);
243 
244 // Return the linker module, if valid for the platform.
245 LoadedModule *GetLinker();
246 
247 // Return true if LSan has finished leak checking and reported leaks.
248 bool HasReportedLeaks();
249 
250 // Run platform-specific leak handlers.
251 void HandleLeaks();
252 
253 // Wrapper for chunk metadata operations.
254 class LsanMetadata {
255  public:
256   // Constructor accepts address of user-visible chunk.
257   explicit LsanMetadata(uptr chunk);
258   bool allocated() const;
259   ChunkTag tag() const;
260   void set_tag(ChunkTag value);
261   uptr requested_size() const;
262   u32 stack_trace_id() const;
263  private:
264   void *metadata_;
265 };
266 
267 }  // namespace __lsan
268 
269 extern "C" {
270 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
271 const char *__lsan_default_options();
272 
273 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
274 int __lsan_is_turned_off();
275 
276 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
277 const char *__lsan_default_suppressions();
278 }  // extern "C"
279 
280 #endif  // LSAN_COMMON_H
281