xref: /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h (revision a03411e84728e9b267056fd31c7d1d9d1dc1b01e)
1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between run-time libraries of sanitizers.
10 //
11 // It declares common functions and classes that are used in both runtimes.
12 // Implementation of some functions are provided in sanitizer_common, while
13 // others must be defined by run-time library itself.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_COMMON_H
16 #define SANITIZER_COMMON_H
17 
18 #include "sanitizer_flags.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_libc.h"
21 #include "sanitizer_list.h"
22 #include "sanitizer_mutex.h"
23 
24 #if defined(_MSC_VER) && !defined(__clang__)
25 extern "C" void _ReadWriteBarrier();
26 #pragma intrinsic(_ReadWriteBarrier)
27 #endif
28 
29 namespace __sanitizer {
30 
31 struct AddressInfo;
32 struct BufferedStackTrace;
33 struct SignalContext;
34 struct StackTrace;
35 
36 // Constants.
37 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
38 const uptr kWordSizeInBits = 8 * kWordSize;
39 
40 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
41 
42 const uptr kMaxPathLength = 4096;
43 
44 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
45 
46 const uptr kErrorMessageBufferSize = 1 << 16;
47 
48 // Denotes fake PC values that come from JIT/JAVA/etc.
49 // For such PC values __tsan_symbolize_external_ex() will be called.
50 const u64 kExternalPCBit = 1ULL << 60;
51 
52 extern const char *SanitizerToolName;  // Can be changed by the tool.
53 
54 extern atomic_uint32_t current_verbosity;
55 inline void SetVerbosity(int verbosity) {
56   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
57 }
58 inline int Verbosity() {
59   return atomic_load(&current_verbosity, memory_order_relaxed);
60 }
61 
62 #if SANITIZER_ANDROID
63 inline uptr GetPageSize() {
64 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
65   return 4096;
66 }
67 inline uptr GetPageSizeCached() {
68   return 4096;
69 }
70 #else
71 uptr GetPageSize();
72 extern uptr PageSizeCached;
73 inline uptr GetPageSizeCached() {
74   if (!PageSizeCached)
75     PageSizeCached = GetPageSize();
76   return PageSizeCached;
77 }
78 #endif
79 uptr GetMmapGranularity();
80 uptr GetMaxVirtualAddress();
81 uptr GetMaxUserVirtualAddress();
82 // Threads
83 tid_t GetTid();
84 int TgKill(pid_t pid, tid_t tid, int sig);
85 uptr GetThreadSelf();
86 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
87                                 uptr *stack_bottom);
88 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
89                           uptr *tls_addr, uptr *tls_size);
90 
91 // Memory management
92 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
93 inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
94   return MmapOrDie(size, mem_type, /*raw_report*/ true);
95 }
96 void UnmapOrDie(void *addr, uptr size);
97 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
98 // case returns nullptr.
99 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
100 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
101      WARN_UNUSED_RESULT;
102 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
103                              const char *name = nullptr) WARN_UNUSED_RESULT;
104 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
105 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
106 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
107 // that case returns nullptr.
108 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
109                                  const char *name = nullptr);
110 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
111 void *MmapNoAccess(uptr size);
112 // Map aligned chunk of address space; size and alignment are powers of two.
113 // Dies on all but out of memory errors, in the latter case returns nullptr.
114 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
115                                    const char *mem_type);
116 // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
117 // unaccessible memory.
118 bool MprotectNoAccess(uptr addr, uptr size);
119 bool MprotectReadOnly(uptr addr, uptr size);
120 bool MprotectReadWrite(uptr addr, uptr size);
121 
122 void MprotectMallocZones(void *addr, int prot);
123 
124 #if SANITIZER_WINDOWS
125 // Zero previously mmap'd memory. Currently used only on Windows.
126 bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
127 #endif
128 
129 #if SANITIZER_LINUX
130 // Unmap memory. Currently only used on Linux.
131 void UnmapFromTo(uptr from, uptr to);
132 #endif
133 
134 // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
135 // be aligned to the mmap granularity * 2^shadow_scale, or to
136 // 2^min_shadow_base_alignment if that is larger. The returned address will
137 // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
138 // shadow_size_bytes bytes on the right, which on linux is mapped no access.
139 // The high_mem_end may be updated if the original shadow size doesn't fit.
140 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
141                       uptr min_shadow_base_alignment, uptr &high_mem_end);
142 
143 // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
144 // Reserves 2*S bytes of address space to the right of the returned address and
145 // ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.
146 // Also creates num_aliases regions of accessible memory starting at offset S
147 // from the returned address.  Each region has size alias_size and is backed by
148 // the same physical memory.
149 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
150                                 uptr num_aliases, uptr ring_buffer_size);
151 
152 // Reserve memory range [beg, end]. If madvise_shadow is true then apply
153 // madvise (e.g. hugepages, core dumping) requested by options.
154 void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
155                               bool madvise_shadow = true);
156 
157 // Protect size bytes of memory starting at addr. Also try to protect
158 // several pages at the start of the address space as specified by
159 // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
160 void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
161                 uptr zero_base_max_shadow_start);
162 
163 // Find an available address space.
164 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
165                               uptr *largest_gap_found, uptr *max_occupied_addr);
166 
167 // Used to check if we can map shadow memory to a fixed location.
168 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
169 // Releases memory pages entirely within the [beg, end] address range. Noop if
170 // the provided range does not contain at least one entire page.
171 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
172 void IncreaseTotalMmap(uptr size);
173 void DecreaseTotalMmap(uptr size);
174 uptr GetRSS();
175 void SetShadowRegionHugePageMode(uptr addr, uptr length);
176 bool DontDumpShadowMemory(uptr addr, uptr length);
177 // Check if the built VMA size matches the runtime one.
178 void CheckVMASize();
179 void RunMallocHooks(void *ptr, uptr size);
180 void RunFreeHooks(void *ptr);
181 
182 class ReservedAddressRange {
183  public:
184   uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
185   uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
186   uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
187   uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
188   void Unmap(uptr addr, uptr size);
189   void *base() const { return base_; }
190   uptr size() const { return size_; }
191 
192  private:
193   void* base_;
194   uptr size_;
195   const char* name_;
196   uptr os_handle_;
197 };
198 
199 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
200                                /*out*/ uptr *stats);
201 
202 // Parse the contents of /proc/self/smaps and generate a memory profile.
203 // |cb| is a tool-specific callback that fills the |stats| array.
204 void GetMemoryProfile(fill_profile_f cb, uptr *stats);
205 void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
206                             uptr smaps_len);
207 
208 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
209 // constructor, so all instances of LowLevelAllocator should be
210 // linker initialized.
211 class LowLevelAllocator {
212  public:
213   // Requires an external lock.
214   void *Allocate(uptr size);
215 
216  private:
217   char *allocated_end_;
218   char *allocated_current_;
219 };
220 // Set the min alignment of LowLevelAllocator to at least alignment.
221 void SetLowLevelAllocateMinAlignment(uptr alignment);
222 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
223 // Allows to register tool-specific callbacks for LowLevelAllocator.
224 // Passing NULL removes the callback.
225 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
226 
227 // IO
228 void CatastrophicErrorWrite(const char *buffer, uptr length);
229 void RawWrite(const char *buffer);
230 bool ColorizeReports();
231 void RemoveANSIEscapeSequencesFromString(char *buffer);
232 void Printf(const char *format, ...) FORMAT(1, 2);
233 void Report(const char *format, ...) FORMAT(1, 2);
234 void SetPrintfAndReportCallback(void (*callback)(const char *));
235 #define VReport(level, ...)                                              \
236   do {                                                                   \
237     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
238   } while (0)
239 #define VPrintf(level, ...)                                              \
240   do {                                                                   \
241     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
242   } while (0)
243 
244 // Lock sanitizer error reporting and protects against nested errors.
245 class ScopedErrorReportLock {
246  public:
247   ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
248   ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
249 
250   static void Lock() SANITIZER_ACQUIRE(mutex_);
251   static void Unlock() SANITIZER_RELEASE(mutex_);
252   static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
253 
254  private:
255   static atomic_uintptr_t reporting_thread_;
256   static StaticSpinMutex mutex_;
257 };
258 
259 extern uptr stoptheworld_tracer_pid;
260 extern uptr stoptheworld_tracer_ppid;
261 
262 bool IsAccessibleMemoryRange(uptr beg, uptr size);
263 
264 // Error report formatting.
265 const char *StripPathPrefix(const char *filepath,
266                             const char *strip_file_prefix);
267 // Strip the directories from the module name.
268 const char *StripModuleName(const char *module);
269 
270 // OS
271 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
272 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
273 uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
274 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
275 const char *GetProcessName();
276 void UpdateProcessName();
277 void CacheBinaryName();
278 void DisableCoreDumperIfNecessary();
279 void DumpProcessMap();
280 const char *GetEnv(const char *name);
281 bool SetEnv(const char *name, const char *value);
282 
283 u32 GetUid();
284 void ReExec();
285 void CheckASLR();
286 void CheckMPROTECT();
287 char **GetArgv();
288 char **GetEnviron();
289 void PrintCmdline();
290 bool StackSizeIsUnlimited();
291 void SetStackSizeLimitInBytes(uptr limit);
292 bool AddressSpaceIsUnlimited();
293 void SetAddressSpaceUnlimited();
294 void AdjustStackSize(void *attr);
295 void PlatformPrepareForSandboxing(void *args);
296 void SetSandboxingCallback(void (*f)());
297 
298 void InitializeCoverage(bool enabled, const char *coverage_dir);
299 
300 void InitTlsSize();
301 uptr GetTlsSize();
302 
303 // Other
304 void WaitForDebugger(unsigned seconds, const char *label);
305 void SleepForSeconds(unsigned seconds);
306 void SleepForMillis(unsigned millis);
307 u64 NanoTime();
308 u64 MonotonicNanoTime();
309 int Atexit(void (*function)(void));
310 bool TemplateMatch(const char *templ, const char *str);
311 
312 // Exit
313 void NORETURN Abort();
314 void NORETURN Die();
315 void NORETURN
316 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
317 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
318                                       const char *mmap_type, error_t err,
319                                       bool raw_report = false);
320 void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
321                                         bool raw_report = false);
322 
323 // Returns true if the platform-specific error reported is an OOM error.
324 bool ErrorIsOOM(error_t err);
325 
326 // This reports an error in the form:
327 //
328 //   `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
329 //
330 // Downstream tools that read sanitizer output will know that errors starting
331 // in this format are specifically OOM errors.
332 #define ERROR_OOM(err_msg, ...) \
333   Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
334 
335 // Specific tools may override behavior of "Die" function to do tool-specific
336 // job.
337 typedef void (*DieCallbackType)(void);
338 
339 // It's possible to add several callbacks that would be run when "Die" is
340 // called. The callbacks will be run in the opposite order. The tools are
341 // strongly recommended to setup all callbacks during initialization, when there
342 // is only a single thread.
343 bool AddDieCallback(DieCallbackType callback);
344 bool RemoveDieCallback(DieCallbackType callback);
345 
346 void SetUserDieCallback(DieCallbackType callback);
347 
348 void SetCheckUnwindCallback(void (*callback)());
349 
350 // Functions related to signal handling.
351 typedef void (*SignalHandlerType)(int, void *, void *);
352 HandleSignalMode GetHandleSignalMode(int signum);
353 void InstallDeadlySignalHandlers(SignalHandlerType handler);
354 
355 // Signal reporting.
356 // Each sanitizer uses slightly different implementation of stack unwinding.
357 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
358                                               const void *callback_context,
359                                               BufferedStackTrace *stack);
360 // Print deadly signal report and die.
361 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
362                         UnwindSignalStackCallbackType unwind,
363                         const void *unwind_context);
364 
365 // Part of HandleDeadlySignal, exposed for asan.
366 void StartReportDeadlySignal();
367 // Part of HandleDeadlySignal, exposed for asan.
368 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
369                         UnwindSignalStackCallbackType unwind,
370                         const void *unwind_context);
371 
372 // Alternative signal stack (POSIX-only).
373 void SetAlternateSignalStack();
374 void UnsetAlternateSignalStack();
375 
376 // Construct a one-line string:
377 //   SUMMARY: SanitizerToolName: error_message
378 // and pass it to __sanitizer_report_error_summary.
379 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
380 void ReportErrorSummary(const char *error_message,
381                         const char *alt_tool_name = nullptr);
382 // Same as above, but construct error_message as:
383 //   error_type file:line[:column][ function]
384 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
385                         const char *alt_tool_name = nullptr);
386 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
387 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
388                         const char *alt_tool_name = nullptr);
389 
390 void ReportMmapWriteExec(int prot, int mflags);
391 
392 // Math
393 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
394 extern "C" {
395 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
396 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
397 #if defined(_WIN64)
398 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
399 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
400 #endif
401 }
402 #endif
403 
404 inline uptr MostSignificantSetBitIndex(uptr x) {
405   CHECK_NE(x, 0U);
406   unsigned long up;
407 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
408 # ifdef _WIN64
409   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
410 # else
411   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
412 # endif
413 #elif defined(_WIN64)
414   _BitScanReverse64(&up, x);
415 #else
416   _BitScanReverse(&up, x);
417 #endif
418   return up;
419 }
420 
421 inline uptr LeastSignificantSetBitIndex(uptr x) {
422   CHECK_NE(x, 0U);
423   unsigned long up;
424 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
425 # ifdef _WIN64
426   up = __builtin_ctzll(x);
427 # else
428   up = __builtin_ctzl(x);
429 # endif
430 #elif defined(_WIN64)
431   _BitScanForward64(&up, x);
432 #else
433   _BitScanForward(&up, x);
434 #endif
435   return up;
436 }
437 
438 inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
439 
440 inline uptr RoundUpToPowerOfTwo(uptr size) {
441   CHECK(size);
442   if (IsPowerOfTwo(size)) return size;
443 
444   uptr up = MostSignificantSetBitIndex(size);
445   CHECK_LT(size, (1ULL << (up + 1)));
446   CHECK_GT(size, (1ULL << up));
447   return 1ULL << (up + 1);
448 }
449 
450 inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
451   RAW_CHECK(IsPowerOfTwo(boundary));
452   return (size + boundary - 1) & ~(boundary - 1);
453 }
454 
455 inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
456   return x & ~(boundary - 1);
457 }
458 
459 inline constexpr bool IsAligned(uptr a, uptr alignment) {
460   return (a & (alignment - 1)) == 0;
461 }
462 
463 inline uptr Log2(uptr x) {
464   CHECK(IsPowerOfTwo(x));
465   return LeastSignificantSetBitIndex(x);
466 }
467 
468 // Don't use std::min, std::max or std::swap, to minimize dependency
469 // on libstdc++.
470 template <class T>
471 constexpr T Min(T a, T b) {
472   return a < b ? a : b;
473 }
474 template <class T>
475 constexpr T Max(T a, T b) {
476   return a > b ? a : b;
477 }
478 template <class T>
479 constexpr T Abs(T a) {
480   return a < 0 ? -a : a;
481 }
482 template<class T> void Swap(T& a, T& b) {
483   T tmp = a;
484   a = b;
485   b = tmp;
486 }
487 
488 // Char handling
489 inline bool IsSpace(int c) {
490   return (c == ' ') || (c == '\n') || (c == '\t') ||
491          (c == '\f') || (c == '\r') || (c == '\v');
492 }
493 inline bool IsDigit(int c) {
494   return (c >= '0') && (c <= '9');
495 }
496 inline int ToLower(int c) {
497   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
498 }
499 
500 // A low-level vector based on mmap. May incur a significant memory overhead for
501 // small vectors.
502 // WARNING: The current implementation supports only POD types.
503 template<typename T>
504 class InternalMmapVectorNoCtor {
505  public:
506   using value_type = T;
507   void Initialize(uptr initial_capacity) {
508     capacity_bytes_ = 0;
509     size_ = 0;
510     data_ = 0;
511     reserve(initial_capacity);
512   }
513   void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
514   T &operator[](uptr i) {
515     CHECK_LT(i, size_);
516     return data_[i];
517   }
518   const T &operator[](uptr i) const {
519     CHECK_LT(i, size_);
520     return data_[i];
521   }
522   void push_back(const T &element) {
523     if (UNLIKELY(size_ >= capacity())) {
524       CHECK_EQ(size_, capacity());
525       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
526       Realloc(new_capacity);
527     }
528     internal_memcpy(&data_[size_++], &element, sizeof(T));
529   }
530   T &back() {
531     CHECK_GT(size_, 0);
532     return data_[size_ - 1];
533   }
534   void pop_back() {
535     CHECK_GT(size_, 0);
536     size_--;
537   }
538   uptr size() const {
539     return size_;
540   }
541   const T *data() const {
542     return data_;
543   }
544   T *data() {
545     return data_;
546   }
547   uptr capacity() const { return capacity_bytes_ / sizeof(T); }
548   void reserve(uptr new_size) {
549     // Never downsize internal buffer.
550     if (new_size > capacity())
551       Realloc(new_size);
552   }
553   void resize(uptr new_size) {
554     if (new_size > size_) {
555       reserve(new_size);
556       internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
557     }
558     size_ = new_size;
559   }
560 
561   void clear() { size_ = 0; }
562   bool empty() const { return size() == 0; }
563 
564   const T *begin() const {
565     return data();
566   }
567   T *begin() {
568     return data();
569   }
570   const T *end() const {
571     return data() + size();
572   }
573   T *end() {
574     return data() + size();
575   }
576 
577   void swap(InternalMmapVectorNoCtor &other) {
578     Swap(data_, other.data_);
579     Swap(capacity_bytes_, other.capacity_bytes_);
580     Swap(size_, other.size_);
581   }
582 
583  private:
584   NOINLINE void Realloc(uptr new_capacity) {
585     CHECK_GT(new_capacity, 0);
586     CHECK_LE(size_, new_capacity);
587     uptr new_capacity_bytes =
588         RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
589     T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
590     internal_memcpy(new_data, data_, size_ * sizeof(T));
591     UnmapOrDie(data_, capacity_bytes_);
592     data_ = new_data;
593     capacity_bytes_ = new_capacity_bytes;
594   }
595 
596   T *data_;
597   uptr capacity_bytes_;
598   uptr size_;
599 };
600 
601 template <typename T>
602 bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
603                 const InternalMmapVectorNoCtor<T> &rhs) {
604   if (lhs.size() != rhs.size()) return false;
605   return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
606 }
607 
608 template <typename T>
609 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
610                 const InternalMmapVectorNoCtor<T> &rhs) {
611   return !(lhs == rhs);
612 }
613 
614 template<typename T>
615 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
616  public:
617   InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
618   explicit InternalMmapVector(uptr cnt) {
619     InternalMmapVectorNoCtor<T>::Initialize(cnt);
620     this->resize(cnt);
621   }
622   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
623   // Disallow copies and moves.
624   InternalMmapVector(const InternalMmapVector &) = delete;
625   InternalMmapVector &operator=(const InternalMmapVector &) = delete;
626   InternalMmapVector(InternalMmapVector &&) = delete;
627   InternalMmapVector &operator=(InternalMmapVector &&) = delete;
628 };
629 
630 class InternalScopedString {
631  public:
632   InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
633 
634   uptr length() const { return buffer_.size() - 1; }
635   void clear() {
636     buffer_.resize(1);
637     buffer_[0] = '\0';
638   }
639   void append(const char *format, ...) FORMAT(2, 3);
640   const char *data() const { return buffer_.data(); }
641   char *data() { return buffer_.data(); }
642 
643  private:
644   InternalMmapVector<char> buffer_;
645 };
646 
647 template <class T>
648 struct CompareLess {
649   bool operator()(const T &a, const T &b) const { return a < b; }
650 };
651 
652 // HeapSort for arrays and InternalMmapVector.
653 template <class T, class Compare = CompareLess<T>>
654 void Sort(T *v, uptr size, Compare comp = {}) {
655   if (size < 2)
656     return;
657   // Stage 1: insert elements to the heap.
658   for (uptr i = 1; i < size; i++) {
659     uptr j, p;
660     for (j = i; j > 0; j = p) {
661       p = (j - 1) / 2;
662       if (comp(v[p], v[j]))
663         Swap(v[j], v[p]);
664       else
665         break;
666     }
667   }
668   // Stage 2: swap largest element with the last one,
669   // and sink the new top.
670   for (uptr i = size - 1; i > 0; i--) {
671     Swap(v[0], v[i]);
672     uptr j, max_ind;
673     for (j = 0; j < i; j = max_ind) {
674       uptr left = 2 * j + 1;
675       uptr right = 2 * j + 2;
676       max_ind = j;
677       if (left < i && comp(v[max_ind], v[left]))
678         max_ind = left;
679       if (right < i && comp(v[max_ind], v[right]))
680         max_ind = right;
681       if (max_ind != j)
682         Swap(v[j], v[max_ind]);
683       else
684         break;
685     }
686   }
687 }
688 
689 // Works like std::lower_bound: finds the first element that is not less
690 // than the val.
691 template <class Container, class T,
692           class Compare = CompareLess<typename Container::value_type>>
693 uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
694   uptr first = 0;
695   uptr last = v.size();
696   while (last > first) {
697     uptr mid = (first + last) / 2;
698     if (comp(v[mid], val))
699       first = mid + 1;
700     else
701       last = mid;
702   }
703   return first;
704 }
705 
706 enum ModuleArch {
707   kModuleArchUnknown,
708   kModuleArchI386,
709   kModuleArchX86_64,
710   kModuleArchX86_64H,
711   kModuleArchARMV6,
712   kModuleArchARMV7,
713   kModuleArchARMV7S,
714   kModuleArchARMV7K,
715   kModuleArchARM64,
716   kModuleArchLoongArch64,
717   kModuleArchRISCV64,
718   kModuleArchHexagon
719 };
720 
721 // Sorts and removes duplicates from the container.
722 template <class Container,
723           class Compare = CompareLess<typename Container::value_type>>
724 void SortAndDedup(Container &v, Compare comp = {}) {
725   Sort(v.data(), v.size(), comp);
726   uptr size = v.size();
727   if (size < 2)
728     return;
729   uptr last = 0;
730   for (uptr i = 1; i < size; ++i) {
731     if (comp(v[last], v[i])) {
732       ++last;
733       if (last != i)
734         v[last] = v[i];
735     } else {
736       CHECK(!comp(v[i], v[last]));
737     }
738   }
739   v.resize(last + 1);
740 }
741 
742 constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
743 
744 // Opens the file 'file_name" and reads up to 'max_len' bytes.
745 // The resulting buffer is mmaped and stored in '*buff'.
746 // Returns true if file was successfully opened and read.
747 bool ReadFileToVector(const char *file_name,
748                       InternalMmapVectorNoCtor<char> *buff,
749                       uptr max_len = kDefaultFileMaxSize,
750                       error_t *errno_p = nullptr);
751 
752 // Opens the file 'file_name" and reads up to 'max_len' bytes.
753 // This function is less I/O efficient than ReadFileToVector as it may reread
754 // file multiple times to avoid mmap during read attempts. It's used to read
755 // procmap, so short reads with mmap in between can produce inconsistent result.
756 // The resulting buffer is mmaped and stored in '*buff'.
757 // The size of the mmaped region is stored in '*buff_size'.
758 // The total number of read bytes is stored in '*read_len'.
759 // Returns true if file was successfully opened and read.
760 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
761                       uptr *read_len, uptr max_len = kDefaultFileMaxSize,
762                       error_t *errno_p = nullptr);
763 
764 int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
765                             uptr *pc_offset);
766 
767 // When adding a new architecture, don't forget to also update
768 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
769 inline const char *ModuleArchToString(ModuleArch arch) {
770   switch (arch) {
771     case kModuleArchUnknown:
772       return "";
773     case kModuleArchI386:
774       return "i386";
775     case kModuleArchX86_64:
776       return "x86_64";
777     case kModuleArchX86_64H:
778       return "x86_64h";
779     case kModuleArchARMV6:
780       return "armv6";
781     case kModuleArchARMV7:
782       return "armv7";
783     case kModuleArchARMV7S:
784       return "armv7s";
785     case kModuleArchARMV7K:
786       return "armv7k";
787     case kModuleArchARM64:
788       return "arm64";
789     case kModuleArchLoongArch64:
790       return "loongarch64";
791     case kModuleArchRISCV64:
792       return "riscv64";
793     case kModuleArchHexagon:
794       return "hexagon";
795   }
796   CHECK(0 && "Invalid module arch");
797   return "";
798 }
799 
800 #if SANITIZER_APPLE
801 const uptr kModuleUUIDSize = 16;
802 #else
803 const uptr kModuleUUIDSize = 32;
804 #endif
805 const uptr kMaxSegName = 16;
806 
807 // Represents a binary loaded into virtual memory (e.g. this can be an
808 // executable or a shared object).
809 class LoadedModule {
810  public:
811   LoadedModule()
812       : full_name_(nullptr),
813         base_address_(0),
814         max_address_(0),
815         arch_(kModuleArchUnknown),
816         uuid_size_(0),
817         instrumented_(false) {
818     internal_memset(uuid_, 0, kModuleUUIDSize);
819     ranges_.clear();
820   }
821   void set(const char *module_name, uptr base_address);
822   void set(const char *module_name, uptr base_address, ModuleArch arch,
823            u8 uuid[kModuleUUIDSize], bool instrumented);
824   void setUuid(const char *uuid, uptr size);
825   void clear();
826   void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
827                        const char *name = nullptr);
828   bool containsAddress(uptr address) const;
829 
830   const char *full_name() const { return full_name_; }
831   uptr base_address() const { return base_address_; }
832   uptr max_address() const { return max_address_; }
833   ModuleArch arch() const { return arch_; }
834   const u8 *uuid() const { return uuid_; }
835   uptr uuid_size() const { return uuid_size_; }
836   bool instrumented() const { return instrumented_; }
837 
838   struct AddressRange {
839     AddressRange *next;
840     uptr beg;
841     uptr end;
842     bool executable;
843     bool writable;
844     char name[kMaxSegName];
845 
846     AddressRange(uptr beg, uptr end, bool executable, bool writable,
847                  const char *name)
848         : next(nullptr),
849           beg(beg),
850           end(end),
851           executable(executable),
852           writable(writable) {
853       internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
854     }
855   };
856 
857   const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
858 
859  private:
860   char *full_name_;  // Owned.
861   uptr base_address_;
862   uptr max_address_;
863   ModuleArch arch_;
864   uptr uuid_size_;
865   u8 uuid_[kModuleUUIDSize];
866   bool instrumented_;
867   IntrusiveList<AddressRange> ranges_;
868 };
869 
870 // List of LoadedModules. OS-dependent implementation is responsible for
871 // filling this information.
872 class ListOfModules {
873  public:
874   ListOfModules() : initialized(false) {}
875   ~ListOfModules() { clear(); }
876   void init();
877   void fallbackInit();  // Uses fallback init if available, otherwise clears
878   const LoadedModule *begin() const { return modules_.begin(); }
879   LoadedModule *begin() { return modules_.begin(); }
880   const LoadedModule *end() const { return modules_.end(); }
881   LoadedModule *end() { return modules_.end(); }
882   uptr size() const { return modules_.size(); }
883   const LoadedModule &operator[](uptr i) const {
884     CHECK_LT(i, modules_.size());
885     return modules_[i];
886   }
887 
888  private:
889   void clear() {
890     for (auto &module : modules_) module.clear();
891     modules_.clear();
892   }
893   void clearOrInit() {
894     initialized ? clear() : modules_.Initialize(kInitialCapacity);
895     initialized = true;
896   }
897 
898   InternalMmapVectorNoCtor<LoadedModule> modules_;
899   // We rarely have more than 16K loaded modules.
900   static const uptr kInitialCapacity = 1 << 14;
901   bool initialized;
902 };
903 
904 // Callback type for iterating over a set of memory ranges.
905 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
906 
907 enum AndroidApiLevel {
908   ANDROID_NOT_ANDROID = 0,
909   ANDROID_KITKAT = 19,
910   ANDROID_LOLLIPOP_MR1 = 22,
911   ANDROID_POST_LOLLIPOP = 23
912 };
913 
914 void WriteToSyslog(const char *buffer);
915 
916 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
917 #define SANITIZER_WIN_TRACE 1
918 #else
919 #define SANITIZER_WIN_TRACE 0
920 #endif
921 
922 #if SANITIZER_APPLE || SANITIZER_WIN_TRACE
923 void LogFullErrorReport(const char *buffer);
924 #else
925 inline void LogFullErrorReport(const char *buffer) {}
926 #endif
927 
928 #if SANITIZER_LINUX || SANITIZER_APPLE
929 void WriteOneLineToSyslog(const char *s);
930 void LogMessageOnPrintf(const char *str);
931 #else
932 inline void WriteOneLineToSyslog(const char *s) {}
933 inline void LogMessageOnPrintf(const char *str) {}
934 #endif
935 
936 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
937 // Initialize Android logging. Any writes before this are silently lost.
938 void AndroidLogInit();
939 void SetAbortMessage(const char *);
940 #else
941 inline void AndroidLogInit() {}
942 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
943 inline void SetAbortMessage(const char *) {}
944 #endif
945 
946 #if SANITIZER_ANDROID
947 void SanitizerInitializeUnwinder();
948 AndroidApiLevel AndroidGetApiLevel();
949 #else
950 inline void AndroidLogWrite(const char *buffer_unused) {}
951 inline void SanitizerInitializeUnwinder() {}
952 inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
953 #endif
954 
955 inline uptr GetPthreadDestructorIterations() {
956 #if SANITIZER_ANDROID
957   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
958 #elif SANITIZER_POSIX
959   return 4;
960 #else
961 // Unused on Windows.
962   return 0;
963 #endif
964 }
965 
966 void *internal_start_thread(void *(*func)(void*), void *arg);
967 void internal_join_thread(void *th);
968 void MaybeStartBackgroudThread();
969 
970 // Make the compiler think that something is going on there.
971 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
972 // compiler from recognising it and turning it into an actual call to
973 // memset/memcpy/etc.
974 static inline void SanitizerBreakOptimization(void *arg) {
975 #if defined(_MSC_VER) && !defined(__clang__)
976   _ReadWriteBarrier();
977 #else
978   __asm__ __volatile__("" : : "r" (arg) : "memory");
979 #endif
980 }
981 
982 struct SignalContext {
983   void *siginfo;
984   void *context;
985   uptr addr;
986   uptr pc;
987   uptr sp;
988   uptr bp;
989   bool is_memory_access;
990   enum WriteFlag { Unknown, Read, Write } write_flag;
991 
992   // In some cases the kernel cannot provide the true faulting address; `addr`
993   // will be zero then.  This field allows to distinguish between these cases
994   // and dereferences of null.
995   bool is_true_faulting_addr;
996 
997   // VS2013 doesn't implement unrestricted unions, so we need a trivial default
998   // constructor
999   SignalContext() = default;
1000 
1001   // Creates signal context in a platform-specific manner.
1002   // SignalContext is going to keep pointers to siginfo and context without
1003   // owning them.
1004   SignalContext(void *siginfo, void *context)
1005       : siginfo(siginfo),
1006         context(context),
1007         addr(GetAddress()),
1008         is_memory_access(IsMemoryAccess()),
1009         write_flag(GetWriteFlag()),
1010         is_true_faulting_addr(IsTrueFaultingAddress()) {
1011     InitPcSpBp();
1012   }
1013 
1014   static void DumpAllRegisters(void *context);
1015 
1016   // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
1017   int GetType() const;
1018 
1019   // String description of the signal.
1020   const char *Describe() const;
1021 
1022   // Returns true if signal is stack overflow.
1023   bool IsStackOverflow() const;
1024 
1025  private:
1026   // Platform specific initialization.
1027   void InitPcSpBp();
1028   uptr GetAddress() const;
1029   WriteFlag GetWriteFlag() const;
1030   bool IsMemoryAccess() const;
1031   bool IsTrueFaultingAddress() const;
1032 };
1033 
1034 void InitializePlatformEarly();
1035 
1036 template <typename Fn>
1037 class RunOnDestruction {
1038  public:
1039   explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1040   ~RunOnDestruction() { fn_(); }
1041 
1042  private:
1043   Fn fn_;
1044 };
1045 
1046 // A simple scope guard. Usage:
1047 // auto cleanup = at_scope_exit([]{ do_cleanup; });
1048 template <typename Fn>
1049 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1050   return RunOnDestruction<Fn>(fn);
1051 }
1052 
1053 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1054 // if a process uses virtual memory over 4TB (as many sanitizers like
1055 // to do).  This function will abort the process if running on a kernel
1056 // that looks vulnerable.
1057 #if SANITIZER_LINUX && SANITIZER_S390_64
1058 void AvoidCVE_2016_2143();
1059 #else
1060 inline void AvoidCVE_2016_2143() {}
1061 #endif
1062 
1063 struct StackDepotStats {
1064   uptr n_uniq_ids;
1065   uptr allocated;
1066 };
1067 
1068 // The default value for allocator_release_to_os_interval_ms common flag to
1069 // indicate that sanitizer allocator should not attempt to release memory to OS.
1070 const s32 kReleaseToOSIntervalNever = -1;
1071 
1072 void CheckNoDeepBind(const char *filename, int flag);
1073 
1074 // Returns the requested amount of random data (up to 256 bytes) that can then
1075 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1076 bool GetRandom(void *buffer, uptr length, bool blocking = true);
1077 
1078 // Returns the number of logical processors on the system.
1079 u32 GetNumberOfCPUs();
1080 extern u32 NumberOfCPUsCached;
1081 inline u32 GetNumberOfCPUsCached() {
1082   if (!NumberOfCPUsCached)
1083     NumberOfCPUsCached = GetNumberOfCPUs();
1084   return NumberOfCPUsCached;
1085 }
1086 
1087 }  // namespace __sanitizer
1088 
1089 inline void *operator new(__sanitizer::operator_new_size_type size,
1090                           __sanitizer::LowLevelAllocator &alloc) {
1091   return alloc.Allocate(size);
1092 }
1093 
1094 #endif  // SANITIZER_COMMON_H
1095