1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between run-time libraries of sanitizers. 10 // 11 // It declares common functions and classes that are used in both runtimes. 12 // Implementation of some functions are provided in sanitizer_common, while 13 // others must be defined by run-time library itself. 14 //===----------------------------------------------------------------------===// 15 #ifndef SANITIZER_COMMON_H 16 #define SANITIZER_COMMON_H 17 18 #include "sanitizer_flags.h" 19 #include "sanitizer_interface_internal.h" 20 #include "sanitizer_internal_defs.h" 21 #include "sanitizer_libc.h" 22 #include "sanitizer_list.h" 23 #include "sanitizer_mutex.h" 24 25 #if defined(_MSC_VER) && !defined(__clang__) 26 extern "C" void _ReadWriteBarrier(); 27 #pragma intrinsic(_ReadWriteBarrier) 28 #endif 29 30 namespace __sanitizer { 31 32 struct AddressInfo; 33 struct BufferedStackTrace; 34 struct SignalContext; 35 struct StackTrace; 36 37 // Constants. 38 const uptr kWordSize = SANITIZER_WORDSIZE / 8; 39 const uptr kWordSizeInBits = 8 * kWordSize; 40 41 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE; 42 43 const uptr kMaxPathLength = 4096; 44 45 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb 46 47 static const uptr kErrorMessageBufferSize = 1 << 16; 48 49 // Denotes fake PC values that come from JIT/JAVA/etc. 50 // For such PC values __tsan_symbolize_external_ex() will be called. 51 const u64 kExternalPCBit = 1ULL << 60; 52 53 extern const char *SanitizerToolName; // Can be changed by the tool. 54 55 extern atomic_uint32_t current_verbosity; 56 INLINE void SetVerbosity(int verbosity) { 57 atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); 58 } 59 INLINE int Verbosity() { 60 return atomic_load(¤t_verbosity, memory_order_relaxed); 61 } 62 63 #if SANITIZER_ANDROID 64 INLINE uptr GetPageSize() { 65 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array. 66 return 4096; 67 } 68 INLINE uptr GetPageSizeCached() { 69 return 4096; 70 } 71 #else 72 uptr GetPageSize(); 73 extern uptr PageSizeCached; 74 INLINE uptr GetPageSizeCached() { 75 if (!PageSizeCached) 76 PageSizeCached = GetPageSize(); 77 return PageSizeCached; 78 } 79 #endif 80 uptr GetMmapGranularity(); 81 uptr GetMaxVirtualAddress(); 82 uptr GetMaxUserVirtualAddress(); 83 // Threads 84 tid_t GetTid(); 85 int TgKill(pid_t pid, tid_t tid, int sig); 86 uptr GetThreadSelf(); 87 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 88 uptr *stack_bottom); 89 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 90 uptr *tls_addr, uptr *tls_size); 91 92 // Memory management 93 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); 94 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) { 95 return MmapOrDie(size, mem_type, /*raw_report*/ true); 96 } 97 void UnmapOrDie(void *addr, uptr size); 98 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that 99 // case returns nullptr. 100 void *MmapOrDieOnFatalError(uptr size, const char *mem_type); 101 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr) 102 WARN_UNUSED_RESULT; 103 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, 104 const char *name = nullptr) WARN_UNUSED_RESULT; 105 void *MmapNoReserveOrDie(uptr size, const char *mem_type); 106 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 107 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in 108 // that case returns nullptr. 109 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, 110 const char *name = nullptr); 111 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); 112 void *MmapNoAccess(uptr size); 113 // Map aligned chunk of address space; size and alignment are powers of two. 114 // Dies on all but out of memory errors, in the latter case returns nullptr. 115 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 116 const char *mem_type); 117 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an 118 // unaccessible memory. 119 bool MprotectNoAccess(uptr addr, uptr size); 120 bool MprotectReadOnly(uptr addr, uptr size); 121 122 void MprotectMallocZones(void *addr, int prot); 123 124 // Find an available address space. 125 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, 126 uptr *largest_gap_found, uptr *max_occupied_addr); 127 128 // Used to check if we can map shadow memory to a fixed location. 129 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); 130 // Releases memory pages entirely within the [beg, end] address range. Noop if 131 // the provided range does not contain at least one entire page. 132 void ReleaseMemoryPagesToOS(uptr beg, uptr end); 133 void IncreaseTotalMmap(uptr size); 134 void DecreaseTotalMmap(uptr size); 135 uptr GetRSS(); 136 void SetShadowRegionHugePageMode(uptr addr, uptr length); 137 bool DontDumpShadowMemory(uptr addr, uptr length); 138 // Check if the built VMA size matches the runtime one. 139 void CheckVMASize(); 140 void RunMallocHooks(const void *ptr, uptr size); 141 void RunFreeHooks(const void *ptr); 142 143 class ReservedAddressRange { 144 public: 145 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); 146 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr); 147 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 148 void Unmap(uptr addr, uptr size); 149 void *base() const { return base_; } 150 uptr size() const { return size_; } 151 152 private: 153 void* base_; 154 uptr size_; 155 const char* name_; 156 uptr os_handle_; 157 }; 158 159 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, 160 /*out*/uptr *stats, uptr stats_size); 161 162 // Parse the contents of /proc/self/smaps and generate a memory profile. 163 // |cb| is a tool-specific callback that fills the |stats| array containing 164 // |stats_size| elements. 165 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size); 166 167 // Simple low-level (mmap-based) allocator for internal use. Doesn't have 168 // constructor, so all instances of LowLevelAllocator should be 169 // linker initialized. 170 class LowLevelAllocator { 171 public: 172 // Requires an external lock. 173 void *Allocate(uptr size); 174 private: 175 char *allocated_end_; 176 char *allocated_current_; 177 }; 178 // Set the min alignment of LowLevelAllocator to at least alignment. 179 void SetLowLevelAllocateMinAlignment(uptr alignment); 180 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size); 181 // Allows to register tool-specific callbacks for LowLevelAllocator. 182 // Passing NULL removes the callback. 183 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); 184 185 // IO 186 void CatastrophicErrorWrite(const char *buffer, uptr length); 187 void RawWrite(const char *buffer); 188 bool ColorizeReports(); 189 void RemoveANSIEscapeSequencesFromString(char *buffer); 190 void Printf(const char *format, ...); 191 void Report(const char *format, ...); 192 void SetPrintfAndReportCallback(void (*callback)(const char *)); 193 #define VReport(level, ...) \ 194 do { \ 195 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \ 196 } while (0) 197 #define VPrintf(level, ...) \ 198 do { \ 199 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \ 200 } while (0) 201 202 // Lock sanitizer error reporting and protects against nested errors. 203 class ScopedErrorReportLock { 204 public: 205 ScopedErrorReportLock(); 206 ~ScopedErrorReportLock(); 207 208 static void CheckLocked(); 209 }; 210 211 extern uptr stoptheworld_tracer_pid; 212 extern uptr stoptheworld_tracer_ppid; 213 214 bool IsAccessibleMemoryRange(uptr beg, uptr size); 215 216 // Error report formatting. 217 const char *StripPathPrefix(const char *filepath, 218 const char *strip_file_prefix); 219 // Strip the directories from the module name. 220 const char *StripModuleName(const char *module); 221 222 // OS 223 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); 224 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); 225 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); 226 const char *GetProcessName(); 227 void UpdateProcessName(); 228 void CacheBinaryName(); 229 void DisableCoreDumperIfNecessary(); 230 void DumpProcessMap(); 231 void PrintModuleMap(); 232 const char *GetEnv(const char *name); 233 bool SetEnv(const char *name, const char *value); 234 235 u32 GetUid(); 236 void ReExec(); 237 void CheckASLR(); 238 void CheckMPROTECT(); 239 char **GetArgv(); 240 char **GetEnviron(); 241 void PrintCmdline(); 242 bool StackSizeIsUnlimited(); 243 void SetStackSizeLimitInBytes(uptr limit); 244 bool AddressSpaceIsUnlimited(); 245 void SetAddressSpaceUnlimited(); 246 void AdjustStackSize(void *attr); 247 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args); 248 void SetSandboxingCallback(void (*f)()); 249 250 void InitializeCoverage(bool enabled, const char *coverage_dir); 251 252 void InitTlsSize(); 253 uptr GetTlsSize(); 254 255 // Other 256 void SleepForSeconds(int seconds); 257 void SleepForMillis(int millis); 258 u64 NanoTime(); 259 u64 MonotonicNanoTime(); 260 int Atexit(void (*function)(void)); 261 bool TemplateMatch(const char *templ, const char *str); 262 263 // Exit 264 void NORETURN Abort(); 265 void NORETURN Die(); 266 void NORETURN 267 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); 268 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, 269 const char *mmap_type, error_t err, 270 bool raw_report = false); 271 272 // Specific tools may override behavior of "Die" and "CheckFailed" functions 273 // to do tool-specific job. 274 typedef void (*DieCallbackType)(void); 275 276 // It's possible to add several callbacks that would be run when "Die" is 277 // called. The callbacks will be run in the opposite order. The tools are 278 // strongly recommended to setup all callbacks during initialization, when there 279 // is only a single thread. 280 bool AddDieCallback(DieCallbackType callback); 281 bool RemoveDieCallback(DieCallbackType callback); 282 283 void SetUserDieCallback(DieCallbackType callback); 284 285 typedef void (*CheckFailedCallbackType)(const char *, int, const char *, 286 u64, u64); 287 void SetCheckFailedCallback(CheckFailedCallbackType callback); 288 289 // Callback will be called if soft_rss_limit_mb is given and the limit is 290 // exceeded (exceeded==true) or if rss went down below the limit 291 // (exceeded==false). 292 // The callback should be registered once at the tool init time. 293 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); 294 295 // Functions related to signal handling. 296 typedef void (*SignalHandlerType)(int, void *, void *); 297 HandleSignalMode GetHandleSignalMode(int signum); 298 void InstallDeadlySignalHandlers(SignalHandlerType handler); 299 300 // Signal reporting. 301 // Each sanitizer uses slightly different implementation of stack unwinding. 302 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig, 303 const void *callback_context, 304 BufferedStackTrace *stack); 305 // Print deadly signal report and die. 306 void HandleDeadlySignal(void *siginfo, void *context, u32 tid, 307 UnwindSignalStackCallbackType unwind, 308 const void *unwind_context); 309 310 // Part of HandleDeadlySignal, exposed for asan. 311 void StartReportDeadlySignal(); 312 // Part of HandleDeadlySignal, exposed for asan. 313 void ReportDeadlySignal(const SignalContext &sig, u32 tid, 314 UnwindSignalStackCallbackType unwind, 315 const void *unwind_context); 316 317 // Alternative signal stack (POSIX-only). 318 void SetAlternateSignalStack(); 319 void UnsetAlternateSignalStack(); 320 321 // We don't want a summary too long. 322 const int kMaxSummaryLength = 1024; 323 // Construct a one-line string: 324 // SUMMARY: SanitizerToolName: error_message 325 // and pass it to __sanitizer_report_error_summary. 326 // If alt_tool_name is provided, it's used in place of SanitizerToolName. 327 void ReportErrorSummary(const char *error_message, 328 const char *alt_tool_name = nullptr); 329 // Same as above, but construct error_message as: 330 // error_type file:line[:column][ function] 331 void ReportErrorSummary(const char *error_type, const AddressInfo &info, 332 const char *alt_tool_name = nullptr); 333 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame. 334 void ReportErrorSummary(const char *error_type, const StackTrace *trace, 335 const char *alt_tool_name = nullptr); 336 337 void ReportMmapWriteExec(int prot); 338 339 // Math 340 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) 341 extern "C" { 342 unsigned char _BitScanForward(unsigned long *index, unsigned long mask); 343 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); 344 #if defined(_WIN64) 345 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); 346 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); 347 #endif 348 } 349 #endif 350 351 INLINE uptr MostSignificantSetBitIndex(uptr x) { 352 CHECK_NE(x, 0U); 353 unsigned long up; 354 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 355 # ifdef _WIN64 356 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x); 357 # else 358 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); 359 # endif 360 #elif defined(_WIN64) 361 _BitScanReverse64(&up, x); 362 #else 363 _BitScanReverse(&up, x); 364 #endif 365 return up; 366 } 367 368 INLINE uptr LeastSignificantSetBitIndex(uptr x) { 369 CHECK_NE(x, 0U); 370 unsigned long up; 371 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 372 # ifdef _WIN64 373 up = __builtin_ctzll(x); 374 # else 375 up = __builtin_ctzl(x); 376 # endif 377 #elif defined(_WIN64) 378 _BitScanForward64(&up, x); 379 #else 380 _BitScanForward(&up, x); 381 #endif 382 return up; 383 } 384 385 INLINE bool IsPowerOfTwo(uptr x) { 386 return (x & (x - 1)) == 0; 387 } 388 389 INLINE uptr RoundUpToPowerOfTwo(uptr size) { 390 CHECK(size); 391 if (IsPowerOfTwo(size)) return size; 392 393 uptr up = MostSignificantSetBitIndex(size); 394 CHECK_LT(size, (1ULL << (up + 1))); 395 CHECK_GT(size, (1ULL << up)); 396 return 1ULL << (up + 1); 397 } 398 399 INLINE uptr RoundUpTo(uptr size, uptr boundary) { 400 RAW_CHECK(IsPowerOfTwo(boundary)); 401 return (size + boundary - 1) & ~(boundary - 1); 402 } 403 404 INLINE uptr RoundDownTo(uptr x, uptr boundary) { 405 return x & ~(boundary - 1); 406 } 407 408 INLINE bool IsAligned(uptr a, uptr alignment) { 409 return (a & (alignment - 1)) == 0; 410 } 411 412 INLINE uptr Log2(uptr x) { 413 CHECK(IsPowerOfTwo(x)); 414 return LeastSignificantSetBitIndex(x); 415 } 416 417 // Don't use std::min, std::max or std::swap, to minimize dependency 418 // on libstdc++. 419 template<class T> T Min(T a, T b) { return a < b ? a : b; } 420 template<class T> T Max(T a, T b) { return a > b ? a : b; } 421 template<class T> void Swap(T& a, T& b) { 422 T tmp = a; 423 a = b; 424 b = tmp; 425 } 426 427 // Char handling 428 INLINE bool IsSpace(int c) { 429 return (c == ' ') || (c == '\n') || (c == '\t') || 430 (c == '\f') || (c == '\r') || (c == '\v'); 431 } 432 INLINE bool IsDigit(int c) { 433 return (c >= '0') && (c <= '9'); 434 } 435 INLINE int ToLower(int c) { 436 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; 437 } 438 439 // A low-level vector based on mmap. May incur a significant memory overhead for 440 // small vectors. 441 // WARNING: The current implementation supports only POD types. 442 template<typename T> 443 class InternalMmapVectorNoCtor { 444 public: 445 void Initialize(uptr initial_capacity) { 446 capacity_bytes_ = 0; 447 size_ = 0; 448 data_ = 0; 449 reserve(initial_capacity); 450 } 451 void Destroy() { UnmapOrDie(data_, capacity_bytes_); } 452 T &operator[](uptr i) { 453 CHECK_LT(i, size_); 454 return data_[i]; 455 } 456 const T &operator[](uptr i) const { 457 CHECK_LT(i, size_); 458 return data_[i]; 459 } 460 void push_back(const T &element) { 461 CHECK_LE(size_, capacity()); 462 if (size_ == capacity()) { 463 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); 464 Realloc(new_capacity); 465 } 466 internal_memcpy(&data_[size_++], &element, sizeof(T)); 467 } 468 T &back() { 469 CHECK_GT(size_, 0); 470 return data_[size_ - 1]; 471 } 472 void pop_back() { 473 CHECK_GT(size_, 0); 474 size_--; 475 } 476 uptr size() const { 477 return size_; 478 } 479 const T *data() const { 480 return data_; 481 } 482 T *data() { 483 return data_; 484 } 485 uptr capacity() const { return capacity_bytes_ / sizeof(T); } 486 void reserve(uptr new_size) { 487 // Never downsize internal buffer. 488 if (new_size > capacity()) 489 Realloc(new_size); 490 } 491 void resize(uptr new_size) { 492 if (new_size > size_) { 493 reserve(new_size); 494 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_)); 495 } 496 size_ = new_size; 497 } 498 499 void clear() { size_ = 0; } 500 bool empty() const { return size() == 0; } 501 502 const T *begin() const { 503 return data(); 504 } 505 T *begin() { 506 return data(); 507 } 508 const T *end() const { 509 return data() + size(); 510 } 511 T *end() { 512 return data() + size(); 513 } 514 515 void swap(InternalMmapVectorNoCtor &other) { 516 Swap(data_, other.data_); 517 Swap(capacity_bytes_, other.capacity_bytes_); 518 Swap(size_, other.size_); 519 } 520 521 private: 522 void Realloc(uptr new_capacity) { 523 CHECK_GT(new_capacity, 0); 524 CHECK_LE(size_, new_capacity); 525 uptr new_capacity_bytes = 526 RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached()); 527 T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector"); 528 internal_memcpy(new_data, data_, size_ * sizeof(T)); 529 UnmapOrDie(data_, capacity_bytes_); 530 data_ = new_data; 531 capacity_bytes_ = new_capacity_bytes; 532 } 533 534 T *data_; 535 uptr capacity_bytes_; 536 uptr size_; 537 }; 538 539 template <typename T> 540 bool operator==(const InternalMmapVectorNoCtor<T> &lhs, 541 const InternalMmapVectorNoCtor<T> &rhs) { 542 if (lhs.size() != rhs.size()) return false; 543 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0; 544 } 545 546 template <typename T> 547 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs, 548 const InternalMmapVectorNoCtor<T> &rhs) { 549 return !(lhs == rhs); 550 } 551 552 template<typename T> 553 class InternalMmapVector : public InternalMmapVectorNoCtor<T> { 554 public: 555 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); } 556 explicit InternalMmapVector(uptr cnt) { 557 InternalMmapVectorNoCtor<T>::Initialize(cnt); 558 this->resize(cnt); 559 } 560 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); } 561 // Disallow copies and moves. 562 InternalMmapVector(const InternalMmapVector &) = delete; 563 InternalMmapVector &operator=(const InternalMmapVector &) = delete; 564 InternalMmapVector(InternalMmapVector &&) = delete; 565 InternalMmapVector &operator=(InternalMmapVector &&) = delete; 566 }; 567 568 class InternalScopedString : public InternalMmapVector<char> { 569 public: 570 explicit InternalScopedString(uptr max_length) 571 : InternalMmapVector<char>(max_length), length_(0) { 572 (*this)[0] = '\0'; 573 } 574 uptr length() { return length_; } 575 void clear() { 576 (*this)[0] = '\0'; 577 length_ = 0; 578 } 579 void append(const char *format, ...); 580 581 private: 582 uptr length_; 583 }; 584 585 template <class T> 586 struct CompareLess { 587 bool operator()(const T &a, const T &b) const { return a < b; } 588 }; 589 590 // HeapSort for arrays and InternalMmapVector. 591 template <class T, class Compare = CompareLess<T>> 592 void Sort(T *v, uptr size, Compare comp = {}) { 593 if (size < 2) 594 return; 595 // Stage 1: insert elements to the heap. 596 for (uptr i = 1; i < size; i++) { 597 uptr j, p; 598 for (j = i; j > 0; j = p) { 599 p = (j - 1) / 2; 600 if (comp(v[p], v[j])) 601 Swap(v[j], v[p]); 602 else 603 break; 604 } 605 } 606 // Stage 2: swap largest element with the last one, 607 // and sink the new top. 608 for (uptr i = size - 1; i > 0; i--) { 609 Swap(v[0], v[i]); 610 uptr j, max_ind; 611 for (j = 0; j < i; j = max_ind) { 612 uptr left = 2 * j + 1; 613 uptr right = 2 * j + 2; 614 max_ind = j; 615 if (left < i && comp(v[max_ind], v[left])) 616 max_ind = left; 617 if (right < i && comp(v[max_ind], v[right])) 618 max_ind = right; 619 if (max_ind != j) 620 Swap(v[j], v[max_ind]); 621 else 622 break; 623 } 624 } 625 } 626 627 // Works like std::lower_bound: finds the first element that is not less 628 // than the val. 629 template <class Container, class Value, class Compare> 630 uptr InternalLowerBound(const Container &v, uptr first, uptr last, 631 const Value &val, Compare comp) { 632 while (last > first) { 633 uptr mid = (first + last) / 2; 634 if (comp(v[mid], val)) 635 first = mid + 1; 636 else 637 last = mid; 638 } 639 return first; 640 } 641 642 enum ModuleArch { 643 kModuleArchUnknown, 644 kModuleArchI386, 645 kModuleArchX86_64, 646 kModuleArchX86_64H, 647 kModuleArchARMV6, 648 kModuleArchARMV7, 649 kModuleArchARMV7S, 650 kModuleArchARMV7K, 651 kModuleArchARM64 652 }; 653 654 // Opens the file 'file_name" and reads up to 'max_len' bytes. 655 // The resulting buffer is mmaped and stored in '*buff'. 656 // Returns true if file was successfully opened and read. 657 bool ReadFileToVector(const char *file_name, 658 InternalMmapVectorNoCtor<char> *buff, 659 uptr max_len = 1 << 26, error_t *errno_p = nullptr); 660 661 // Opens the file 'file_name" and reads up to 'max_len' bytes. 662 // This function is less I/O efficient than ReadFileToVector as it may reread 663 // file multiple times to avoid mmap during read attempts. It's used to read 664 // procmap, so short reads with mmap in between can produce inconsistent result. 665 // The resulting buffer is mmaped and stored in '*buff'. 666 // The size of the mmaped region is stored in '*buff_size'. 667 // The total number of read bytes is stored in '*read_len'. 668 // Returns true if file was successfully opened and read. 669 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, 670 uptr *read_len, uptr max_len = 1 << 26, 671 error_t *errno_p = nullptr); 672 673 // When adding a new architecture, don't forget to also update 674 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp. 675 inline const char *ModuleArchToString(ModuleArch arch) { 676 switch (arch) { 677 case kModuleArchUnknown: 678 return ""; 679 case kModuleArchI386: 680 return "i386"; 681 case kModuleArchX86_64: 682 return "x86_64"; 683 case kModuleArchX86_64H: 684 return "x86_64h"; 685 case kModuleArchARMV6: 686 return "armv6"; 687 case kModuleArchARMV7: 688 return "armv7"; 689 case kModuleArchARMV7S: 690 return "armv7s"; 691 case kModuleArchARMV7K: 692 return "armv7k"; 693 case kModuleArchARM64: 694 return "arm64"; 695 } 696 CHECK(0 && "Invalid module arch"); 697 return ""; 698 } 699 700 const uptr kModuleUUIDSize = 16; 701 const uptr kMaxSegName = 16; 702 703 // Represents a binary loaded into virtual memory (e.g. this can be an 704 // executable or a shared object). 705 class LoadedModule { 706 public: 707 LoadedModule() 708 : full_name_(nullptr), 709 base_address_(0), 710 max_executable_address_(0), 711 arch_(kModuleArchUnknown), 712 instrumented_(false) { 713 internal_memset(uuid_, 0, kModuleUUIDSize); 714 ranges_.clear(); 715 } 716 void set(const char *module_name, uptr base_address); 717 void set(const char *module_name, uptr base_address, ModuleArch arch, 718 u8 uuid[kModuleUUIDSize], bool instrumented); 719 void clear(); 720 void addAddressRange(uptr beg, uptr end, bool executable, bool writable, 721 const char *name = nullptr); 722 bool containsAddress(uptr address) const; 723 724 const char *full_name() const { return full_name_; } 725 uptr base_address() const { return base_address_; } 726 uptr max_executable_address() const { return max_executable_address_; } 727 ModuleArch arch() const { return arch_; } 728 const u8 *uuid() const { return uuid_; } 729 bool instrumented() const { return instrumented_; } 730 731 struct AddressRange { 732 AddressRange *next; 733 uptr beg; 734 uptr end; 735 bool executable; 736 bool writable; 737 char name[kMaxSegName]; 738 739 AddressRange(uptr beg, uptr end, bool executable, bool writable, 740 const char *name) 741 : next(nullptr), 742 beg(beg), 743 end(end), 744 executable(executable), 745 writable(writable) { 746 internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name)); 747 } 748 }; 749 750 const IntrusiveList<AddressRange> &ranges() const { return ranges_; } 751 752 private: 753 char *full_name_; // Owned. 754 uptr base_address_; 755 uptr max_executable_address_; 756 ModuleArch arch_; 757 u8 uuid_[kModuleUUIDSize]; 758 bool instrumented_; 759 IntrusiveList<AddressRange> ranges_; 760 }; 761 762 // List of LoadedModules. OS-dependent implementation is responsible for 763 // filling this information. 764 class ListOfModules { 765 public: 766 ListOfModules() : initialized(false) {} 767 ~ListOfModules() { clear(); } 768 void init(); 769 void fallbackInit(); // Uses fallback init if available, otherwise clears 770 const LoadedModule *begin() const { return modules_.begin(); } 771 LoadedModule *begin() { return modules_.begin(); } 772 const LoadedModule *end() const { return modules_.end(); } 773 LoadedModule *end() { return modules_.end(); } 774 uptr size() const { return modules_.size(); } 775 const LoadedModule &operator[](uptr i) const { 776 CHECK_LT(i, modules_.size()); 777 return modules_[i]; 778 } 779 780 private: 781 void clear() { 782 for (auto &module : modules_) module.clear(); 783 modules_.clear(); 784 } 785 void clearOrInit() { 786 initialized ? clear() : modules_.Initialize(kInitialCapacity); 787 initialized = true; 788 } 789 790 InternalMmapVectorNoCtor<LoadedModule> modules_; 791 // We rarely have more than 16K loaded modules. 792 static const uptr kInitialCapacity = 1 << 14; 793 bool initialized; 794 }; 795 796 // Callback type for iterating over a set of memory ranges. 797 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); 798 799 enum AndroidApiLevel { 800 ANDROID_NOT_ANDROID = 0, 801 ANDROID_KITKAT = 19, 802 ANDROID_LOLLIPOP_MR1 = 22, 803 ANDROID_POST_LOLLIPOP = 23 804 }; 805 806 void WriteToSyslog(const char *buffer); 807 808 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__) 809 #define SANITIZER_WIN_TRACE 1 810 #else 811 #define SANITIZER_WIN_TRACE 0 812 #endif 813 814 #if SANITIZER_MAC || SANITIZER_WIN_TRACE 815 void LogFullErrorReport(const char *buffer); 816 #else 817 INLINE void LogFullErrorReport(const char *buffer) {} 818 #endif 819 820 #if SANITIZER_LINUX || SANITIZER_MAC 821 void WriteOneLineToSyslog(const char *s); 822 void LogMessageOnPrintf(const char *str); 823 #else 824 INLINE void WriteOneLineToSyslog(const char *s) {} 825 INLINE void LogMessageOnPrintf(const char *str) {} 826 #endif 827 828 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE 829 // Initialize Android logging. Any writes before this are silently lost. 830 void AndroidLogInit(); 831 void SetAbortMessage(const char *); 832 #else 833 INLINE void AndroidLogInit() {} 834 // FIXME: MacOS implementation could use CRSetCrashLogMessage. 835 INLINE void SetAbortMessage(const char *) {} 836 #endif 837 838 #if SANITIZER_ANDROID 839 void SanitizerInitializeUnwinder(); 840 AndroidApiLevel AndroidGetApiLevel(); 841 #else 842 INLINE void AndroidLogWrite(const char *buffer_unused) {} 843 INLINE void SanitizerInitializeUnwinder() {} 844 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } 845 #endif 846 847 INLINE uptr GetPthreadDestructorIterations() { 848 #if SANITIZER_ANDROID 849 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; 850 #elif SANITIZER_POSIX 851 return 4; 852 #else 853 // Unused on Windows. 854 return 0; 855 #endif 856 } 857 858 void *internal_start_thread(void(*func)(void*), void *arg); 859 void internal_join_thread(void *th); 860 void MaybeStartBackgroudThread(); 861 862 // Make the compiler think that something is going on there. 863 // Use this inside a loop that looks like memset/memcpy/etc to prevent the 864 // compiler from recognising it and turning it into an actual call to 865 // memset/memcpy/etc. 866 static inline void SanitizerBreakOptimization(void *arg) { 867 #if defined(_MSC_VER) && !defined(__clang__) 868 _ReadWriteBarrier(); 869 #else 870 __asm__ __volatile__("" : : "r" (arg) : "memory"); 871 #endif 872 } 873 874 struct SignalContext { 875 void *siginfo; 876 void *context; 877 uptr addr; 878 uptr pc; 879 uptr sp; 880 uptr bp; 881 bool is_memory_access; 882 enum WriteFlag { UNKNOWN, READ, WRITE } write_flag; 883 884 // In some cases the kernel cannot provide the true faulting address; `addr` 885 // will be zero then. This field allows to distinguish between these cases 886 // and dereferences of null. 887 bool is_true_faulting_addr; 888 889 // VS2013 doesn't implement unrestricted unions, so we need a trivial default 890 // constructor 891 SignalContext() = default; 892 893 // Creates signal context in a platform-specific manner. 894 // SignalContext is going to keep pointers to siginfo and context without 895 // owning them. 896 SignalContext(void *siginfo, void *context) 897 : siginfo(siginfo), 898 context(context), 899 addr(GetAddress()), 900 is_memory_access(IsMemoryAccess()), 901 write_flag(GetWriteFlag()), 902 is_true_faulting_addr(IsTrueFaultingAddress()) { 903 InitPcSpBp(); 904 } 905 906 static void DumpAllRegisters(void *context); 907 908 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION. 909 int GetType() const; 910 911 // String description of the signal. 912 const char *Describe() const; 913 914 // Returns true if signal is stack overflow. 915 bool IsStackOverflow() const; 916 917 private: 918 // Platform specific initialization. 919 void InitPcSpBp(); 920 uptr GetAddress() const; 921 WriteFlag GetWriteFlag() const; 922 bool IsMemoryAccess() const; 923 bool IsTrueFaultingAddress() const; 924 }; 925 926 void InitializePlatformEarly(); 927 void MaybeReexec(); 928 929 template <typename Fn> 930 class RunOnDestruction { 931 public: 932 explicit RunOnDestruction(Fn fn) : fn_(fn) {} 933 ~RunOnDestruction() { fn_(); } 934 935 private: 936 Fn fn_; 937 }; 938 939 // A simple scope guard. Usage: 940 // auto cleanup = at_scope_exit([]{ do_cleanup; }); 941 template <typename Fn> 942 RunOnDestruction<Fn> at_scope_exit(Fn fn) { 943 return RunOnDestruction<Fn>(fn); 944 } 945 946 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine 947 // if a process uses virtual memory over 4TB (as many sanitizers like 948 // to do). This function will abort the process if running on a kernel 949 // that looks vulnerable. 950 #if SANITIZER_LINUX && SANITIZER_S390_64 951 void AvoidCVE_2016_2143(); 952 #else 953 INLINE void AvoidCVE_2016_2143() {} 954 #endif 955 956 struct StackDepotStats { 957 uptr n_uniq_ids; 958 uptr allocated; 959 }; 960 961 // The default value for allocator_release_to_os_interval_ms common flag to 962 // indicate that sanitizer allocator should not attempt to release memory to OS. 963 const s32 kReleaseToOSIntervalNever = -1; 964 965 void CheckNoDeepBind(const char *filename, int flag); 966 967 // Returns the requested amount of random data (up to 256 bytes) that can then 968 // be used to seed a PRNG. Defaults to blocking like the underlying syscall. 969 bool GetRandom(void *buffer, uptr length, bool blocking = true); 970 971 // Returns the number of logical processors on the system. 972 u32 GetNumberOfCPUs(); 973 extern u32 NumberOfCPUsCached; 974 INLINE u32 GetNumberOfCPUsCached() { 975 if (!NumberOfCPUsCached) 976 NumberOfCPUsCached = GetNumberOfCPUs(); 977 return NumberOfCPUsCached; 978 } 979 980 } // namespace __sanitizer 981 982 inline void *operator new(__sanitizer::operator_new_size_type size, 983 __sanitizer::LowLevelAllocator &alloc) { // NOLINT 984 return alloc.Allocate(size); 985 } 986 987 #endif // SANITIZER_COMMON_H 988