1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between run-time libraries of sanitizers. 10 // 11 // It declares common functions and classes that are used in both runtimes. 12 // Implementation of some functions are provided in sanitizer_common, while 13 // others must be defined by run-time library itself. 14 //===----------------------------------------------------------------------===// 15 #ifndef SANITIZER_COMMON_H 16 #define SANITIZER_COMMON_H 17 18 #include "sanitizer_flags.h" 19 #include "sanitizer_interface_internal.h" 20 #include "sanitizer_internal_defs.h" 21 #include "sanitizer_libc.h" 22 #include "sanitizer_list.h" 23 #include "sanitizer_mutex.h" 24 25 #if defined(_MSC_VER) && !defined(__clang__) 26 extern "C" void _ReadWriteBarrier(); 27 #pragma intrinsic(_ReadWriteBarrier) 28 #endif 29 30 namespace __sanitizer { 31 32 struct AddressInfo; 33 struct BufferedStackTrace; 34 struct SignalContext; 35 struct StackTrace; 36 37 // Constants. 38 const uptr kWordSize = SANITIZER_WORDSIZE / 8; 39 const uptr kWordSizeInBits = 8 * kWordSize; 40 41 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE; 42 43 const uptr kMaxPathLength = 4096; 44 45 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb 46 47 static const uptr kErrorMessageBufferSize = 1 << 16; 48 49 // Denotes fake PC values that come from JIT/JAVA/etc. 50 // For such PC values __tsan_symbolize_external_ex() will be called. 51 const u64 kExternalPCBit = 1ULL << 60; 52 53 extern const char *SanitizerToolName; // Can be changed by the tool. 54 55 extern atomic_uint32_t current_verbosity; 56 INLINE void SetVerbosity(int verbosity) { 57 atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); 58 } 59 INLINE int Verbosity() { 60 return atomic_load(¤t_verbosity, memory_order_relaxed); 61 } 62 63 #if SANITIZER_ANDROID 64 INLINE uptr GetPageSize() { 65 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array. 66 return 4096; 67 } 68 INLINE uptr GetPageSizeCached() { 69 return 4096; 70 } 71 #else 72 uptr GetPageSize(); 73 extern uptr PageSizeCached; 74 INLINE uptr GetPageSizeCached() { 75 if (!PageSizeCached) 76 PageSizeCached = GetPageSize(); 77 return PageSizeCached; 78 } 79 #endif 80 uptr GetMmapGranularity(); 81 uptr GetMaxVirtualAddress(); 82 uptr GetMaxUserVirtualAddress(); 83 // Threads 84 tid_t GetTid(); 85 int TgKill(pid_t pid, tid_t tid, int sig); 86 uptr GetThreadSelf(); 87 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 88 uptr *stack_bottom); 89 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 90 uptr *tls_addr, uptr *tls_size); 91 92 // Memory management 93 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); 94 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) { 95 return MmapOrDie(size, mem_type, /*raw_report*/ true); 96 } 97 void UnmapOrDie(void *addr, uptr size); 98 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that 99 // case returns nullptr. 100 void *MmapOrDieOnFatalError(uptr size, const char *mem_type); 101 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr) 102 WARN_UNUSED_RESULT; 103 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, 104 const char *name = nullptr) WARN_UNUSED_RESULT; 105 void *MmapNoReserveOrDie(uptr size, const char *mem_type); 106 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 107 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in 108 // that case returns nullptr. 109 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, 110 const char *name = nullptr); 111 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); 112 void *MmapNoAccess(uptr size); 113 // Map aligned chunk of address space; size and alignment are powers of two. 114 // Dies on all but out of memory errors, in the latter case returns nullptr. 115 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 116 const char *mem_type); 117 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an 118 // unaccessible memory. 119 bool MprotectNoAccess(uptr addr, uptr size); 120 bool MprotectReadOnly(uptr addr, uptr size); 121 122 void MprotectMallocZones(void *addr, int prot); 123 124 // Find an available address space. 125 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, 126 uptr *largest_gap_found, uptr *max_occupied_addr); 127 128 // Used to check if we can map shadow memory to a fixed location. 129 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); 130 // Releases memory pages entirely within the [beg, end] address range. Noop if 131 // the provided range does not contain at least one entire page. 132 void ReleaseMemoryPagesToOS(uptr beg, uptr end); 133 void IncreaseTotalMmap(uptr size); 134 void DecreaseTotalMmap(uptr size); 135 uptr GetRSS(); 136 void SetShadowRegionHugePageMode(uptr addr, uptr length); 137 bool DontDumpShadowMemory(uptr addr, uptr length); 138 // Check if the built VMA size matches the runtime one. 139 void CheckVMASize(); 140 void RunMallocHooks(const void *ptr, uptr size); 141 void RunFreeHooks(const void *ptr); 142 143 class ReservedAddressRange { 144 public: 145 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); 146 uptr InitAligned(uptr size, uptr align, const char *name = nullptr); 147 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr); 148 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 149 void Unmap(uptr addr, uptr size); 150 void *base() const { return base_; } 151 uptr size() const { return size_; } 152 153 private: 154 void* base_; 155 uptr size_; 156 const char* name_; 157 uptr os_handle_; 158 }; 159 160 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, 161 /*out*/uptr *stats, uptr stats_size); 162 163 // Parse the contents of /proc/self/smaps and generate a memory profile. 164 // |cb| is a tool-specific callback that fills the |stats| array containing 165 // |stats_size| elements. 166 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size); 167 168 // Simple low-level (mmap-based) allocator for internal use. Doesn't have 169 // constructor, so all instances of LowLevelAllocator should be 170 // linker initialized. 171 class LowLevelAllocator { 172 public: 173 // Requires an external lock. 174 void *Allocate(uptr size); 175 private: 176 char *allocated_end_; 177 char *allocated_current_; 178 }; 179 // Set the min alignment of LowLevelAllocator to at least alignment. 180 void SetLowLevelAllocateMinAlignment(uptr alignment); 181 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size); 182 // Allows to register tool-specific callbacks for LowLevelAllocator. 183 // Passing NULL removes the callback. 184 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); 185 186 // IO 187 void CatastrophicErrorWrite(const char *buffer, uptr length); 188 void RawWrite(const char *buffer); 189 bool ColorizeReports(); 190 void RemoveANSIEscapeSequencesFromString(char *buffer); 191 void Printf(const char *format, ...); 192 void Report(const char *format, ...); 193 void SetPrintfAndReportCallback(void (*callback)(const char *)); 194 #define VReport(level, ...) \ 195 do { \ 196 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \ 197 } while (0) 198 #define VPrintf(level, ...) \ 199 do { \ 200 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \ 201 } while (0) 202 203 // Lock sanitizer error reporting and protects against nested errors. 204 class ScopedErrorReportLock { 205 public: 206 ScopedErrorReportLock(); 207 ~ScopedErrorReportLock(); 208 209 static void CheckLocked(); 210 }; 211 212 extern uptr stoptheworld_tracer_pid; 213 extern uptr stoptheworld_tracer_ppid; 214 215 bool IsAccessibleMemoryRange(uptr beg, uptr size); 216 217 // Error report formatting. 218 const char *StripPathPrefix(const char *filepath, 219 const char *strip_file_prefix); 220 // Strip the directories from the module name. 221 const char *StripModuleName(const char *module); 222 223 // OS 224 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); 225 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); 226 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); 227 const char *GetProcessName(); 228 void UpdateProcessName(); 229 void CacheBinaryName(); 230 void DisableCoreDumperIfNecessary(); 231 void DumpProcessMap(); 232 void PrintModuleMap(); 233 const char *GetEnv(const char *name); 234 bool SetEnv(const char *name, const char *value); 235 236 u32 GetUid(); 237 void ReExec(); 238 void CheckASLR(); 239 void CheckMPROTECT(); 240 char **GetArgv(); 241 char **GetEnviron(); 242 void PrintCmdline(); 243 bool StackSizeIsUnlimited(); 244 void SetStackSizeLimitInBytes(uptr limit); 245 bool AddressSpaceIsUnlimited(); 246 void SetAddressSpaceUnlimited(); 247 void AdjustStackSize(void *attr); 248 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args); 249 void SetSandboxingCallback(void (*f)()); 250 251 void InitializeCoverage(bool enabled, const char *coverage_dir); 252 253 void InitTlsSize(); 254 uptr GetTlsSize(); 255 256 // Other 257 void SleepForSeconds(int seconds); 258 void SleepForMillis(int millis); 259 u64 NanoTime(); 260 u64 MonotonicNanoTime(); 261 int Atexit(void (*function)(void)); 262 bool TemplateMatch(const char *templ, const char *str); 263 264 // Exit 265 void NORETURN Abort(); 266 void NORETURN Die(); 267 void NORETURN 268 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); 269 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, 270 const char *mmap_type, error_t err, 271 bool raw_report = false); 272 273 // Specific tools may override behavior of "Die" and "CheckFailed" functions 274 // to do tool-specific job. 275 typedef void (*DieCallbackType)(void); 276 277 // It's possible to add several callbacks that would be run when "Die" is 278 // called. The callbacks will be run in the opposite order. The tools are 279 // strongly recommended to setup all callbacks during initialization, when there 280 // is only a single thread. 281 bool AddDieCallback(DieCallbackType callback); 282 bool RemoveDieCallback(DieCallbackType callback); 283 284 void SetUserDieCallback(DieCallbackType callback); 285 286 typedef void (*CheckFailedCallbackType)(const char *, int, const char *, 287 u64, u64); 288 void SetCheckFailedCallback(CheckFailedCallbackType callback); 289 290 // Callback will be called if soft_rss_limit_mb is given and the limit is 291 // exceeded (exceeded==true) or if rss went down below the limit 292 // (exceeded==false). 293 // The callback should be registered once at the tool init time. 294 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); 295 296 // Functions related to signal handling. 297 typedef void (*SignalHandlerType)(int, void *, void *); 298 HandleSignalMode GetHandleSignalMode(int signum); 299 void InstallDeadlySignalHandlers(SignalHandlerType handler); 300 301 // Signal reporting. 302 // Each sanitizer uses slightly different implementation of stack unwinding. 303 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig, 304 const void *callback_context, 305 BufferedStackTrace *stack); 306 // Print deadly signal report and die. 307 void HandleDeadlySignal(void *siginfo, void *context, u32 tid, 308 UnwindSignalStackCallbackType unwind, 309 const void *unwind_context); 310 311 // Part of HandleDeadlySignal, exposed for asan. 312 void StartReportDeadlySignal(); 313 // Part of HandleDeadlySignal, exposed for asan. 314 void ReportDeadlySignal(const SignalContext &sig, u32 tid, 315 UnwindSignalStackCallbackType unwind, 316 const void *unwind_context); 317 318 // Alternative signal stack (POSIX-only). 319 void SetAlternateSignalStack(); 320 void UnsetAlternateSignalStack(); 321 322 // We don't want a summary too long. 323 const int kMaxSummaryLength = 1024; 324 // Construct a one-line string: 325 // SUMMARY: SanitizerToolName: error_message 326 // and pass it to __sanitizer_report_error_summary. 327 // If alt_tool_name is provided, it's used in place of SanitizerToolName. 328 void ReportErrorSummary(const char *error_message, 329 const char *alt_tool_name = nullptr); 330 // Same as above, but construct error_message as: 331 // error_type file:line[:column][ function] 332 void ReportErrorSummary(const char *error_type, const AddressInfo &info, 333 const char *alt_tool_name = nullptr); 334 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame. 335 void ReportErrorSummary(const char *error_type, const StackTrace *trace, 336 const char *alt_tool_name = nullptr); 337 338 void ReportMmapWriteExec(int prot); 339 340 // Math 341 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) 342 extern "C" { 343 unsigned char _BitScanForward(unsigned long *index, unsigned long mask); 344 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); 345 #if defined(_WIN64) 346 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); 347 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); 348 #endif 349 } 350 #endif 351 352 INLINE uptr MostSignificantSetBitIndex(uptr x) { 353 CHECK_NE(x, 0U); 354 unsigned long up; 355 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 356 # ifdef _WIN64 357 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x); 358 # else 359 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); 360 # endif 361 #elif defined(_WIN64) 362 _BitScanReverse64(&up, x); 363 #else 364 _BitScanReverse(&up, x); 365 #endif 366 return up; 367 } 368 369 INLINE uptr LeastSignificantSetBitIndex(uptr x) { 370 CHECK_NE(x, 0U); 371 unsigned long up; 372 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 373 # ifdef _WIN64 374 up = __builtin_ctzll(x); 375 # else 376 up = __builtin_ctzl(x); 377 # endif 378 #elif defined(_WIN64) 379 _BitScanForward64(&up, x); 380 #else 381 _BitScanForward(&up, x); 382 #endif 383 return up; 384 } 385 386 INLINE bool IsPowerOfTwo(uptr x) { 387 return (x & (x - 1)) == 0; 388 } 389 390 INLINE uptr RoundUpToPowerOfTwo(uptr size) { 391 CHECK(size); 392 if (IsPowerOfTwo(size)) return size; 393 394 uptr up = MostSignificantSetBitIndex(size); 395 CHECK_LT(size, (1ULL << (up + 1))); 396 CHECK_GT(size, (1ULL << up)); 397 return 1ULL << (up + 1); 398 } 399 400 INLINE uptr RoundUpTo(uptr size, uptr boundary) { 401 RAW_CHECK(IsPowerOfTwo(boundary)); 402 return (size + boundary - 1) & ~(boundary - 1); 403 } 404 405 INLINE uptr RoundDownTo(uptr x, uptr boundary) { 406 return x & ~(boundary - 1); 407 } 408 409 INLINE bool IsAligned(uptr a, uptr alignment) { 410 return (a & (alignment - 1)) == 0; 411 } 412 413 INLINE uptr Log2(uptr x) { 414 CHECK(IsPowerOfTwo(x)); 415 return LeastSignificantSetBitIndex(x); 416 } 417 418 // Don't use std::min, std::max or std::swap, to minimize dependency 419 // on libstdc++. 420 template<class T> T Min(T a, T b) { return a < b ? a : b; } 421 template<class T> T Max(T a, T b) { return a > b ? a : b; } 422 template<class T> void Swap(T& a, T& b) { 423 T tmp = a; 424 a = b; 425 b = tmp; 426 } 427 428 // Char handling 429 INLINE bool IsSpace(int c) { 430 return (c == ' ') || (c == '\n') || (c == '\t') || 431 (c == '\f') || (c == '\r') || (c == '\v'); 432 } 433 INLINE bool IsDigit(int c) { 434 return (c >= '0') && (c <= '9'); 435 } 436 INLINE int ToLower(int c) { 437 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; 438 } 439 440 // A low-level vector based on mmap. May incur a significant memory overhead for 441 // small vectors. 442 // WARNING: The current implementation supports only POD types. 443 template<typename T> 444 class InternalMmapVectorNoCtor { 445 public: 446 void Initialize(uptr initial_capacity) { 447 capacity_bytes_ = 0; 448 size_ = 0; 449 data_ = 0; 450 reserve(initial_capacity); 451 } 452 void Destroy() { UnmapOrDie(data_, capacity_bytes_); } 453 T &operator[](uptr i) { 454 CHECK_LT(i, size_); 455 return data_[i]; 456 } 457 const T &operator[](uptr i) const { 458 CHECK_LT(i, size_); 459 return data_[i]; 460 } 461 void push_back(const T &element) { 462 CHECK_LE(size_, capacity()); 463 if (size_ == capacity()) { 464 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); 465 Realloc(new_capacity); 466 } 467 internal_memcpy(&data_[size_++], &element, sizeof(T)); 468 } 469 T &back() { 470 CHECK_GT(size_, 0); 471 return data_[size_ - 1]; 472 } 473 void pop_back() { 474 CHECK_GT(size_, 0); 475 size_--; 476 } 477 uptr size() const { 478 return size_; 479 } 480 const T *data() const { 481 return data_; 482 } 483 T *data() { 484 return data_; 485 } 486 uptr capacity() const { return capacity_bytes_ / sizeof(T); } 487 void reserve(uptr new_size) { 488 // Never downsize internal buffer. 489 if (new_size > capacity()) 490 Realloc(new_size); 491 } 492 void resize(uptr new_size) { 493 if (new_size > size_) { 494 reserve(new_size); 495 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_)); 496 } 497 size_ = new_size; 498 } 499 500 void clear() { size_ = 0; } 501 bool empty() const { return size() == 0; } 502 503 const T *begin() const { 504 return data(); 505 } 506 T *begin() { 507 return data(); 508 } 509 const T *end() const { 510 return data() + size(); 511 } 512 T *end() { 513 return data() + size(); 514 } 515 516 void swap(InternalMmapVectorNoCtor &other) { 517 Swap(data_, other.data_); 518 Swap(capacity_bytes_, other.capacity_bytes_); 519 Swap(size_, other.size_); 520 } 521 522 private: 523 void Realloc(uptr new_capacity) { 524 CHECK_GT(new_capacity, 0); 525 CHECK_LE(size_, new_capacity); 526 uptr new_capacity_bytes = 527 RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached()); 528 T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector"); 529 internal_memcpy(new_data, data_, size_ * sizeof(T)); 530 UnmapOrDie(data_, capacity_bytes_); 531 data_ = new_data; 532 capacity_bytes_ = new_capacity_bytes; 533 } 534 535 T *data_; 536 uptr capacity_bytes_; 537 uptr size_; 538 }; 539 540 template <typename T> 541 bool operator==(const InternalMmapVectorNoCtor<T> &lhs, 542 const InternalMmapVectorNoCtor<T> &rhs) { 543 if (lhs.size() != rhs.size()) return false; 544 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0; 545 } 546 547 template <typename T> 548 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs, 549 const InternalMmapVectorNoCtor<T> &rhs) { 550 return !(lhs == rhs); 551 } 552 553 template<typename T> 554 class InternalMmapVector : public InternalMmapVectorNoCtor<T> { 555 public: 556 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); } 557 explicit InternalMmapVector(uptr cnt) { 558 InternalMmapVectorNoCtor<T>::Initialize(cnt); 559 this->resize(cnt); 560 } 561 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); } 562 // Disallow copies and moves. 563 InternalMmapVector(const InternalMmapVector &) = delete; 564 InternalMmapVector &operator=(const InternalMmapVector &) = delete; 565 InternalMmapVector(InternalMmapVector &&) = delete; 566 InternalMmapVector &operator=(InternalMmapVector &&) = delete; 567 }; 568 569 class InternalScopedString : public InternalMmapVector<char> { 570 public: 571 explicit InternalScopedString(uptr max_length) 572 : InternalMmapVector<char>(max_length), length_(0) { 573 (*this)[0] = '\0'; 574 } 575 uptr length() { return length_; } 576 void clear() { 577 (*this)[0] = '\0'; 578 length_ = 0; 579 } 580 void append(const char *format, ...); 581 582 private: 583 uptr length_; 584 }; 585 586 template <class T> 587 struct CompareLess { 588 bool operator()(const T &a, const T &b) const { return a < b; } 589 }; 590 591 // HeapSort for arrays and InternalMmapVector. 592 template <class T, class Compare = CompareLess<T>> 593 void Sort(T *v, uptr size, Compare comp = {}) { 594 if (size < 2) 595 return; 596 // Stage 1: insert elements to the heap. 597 for (uptr i = 1; i < size; i++) { 598 uptr j, p; 599 for (j = i; j > 0; j = p) { 600 p = (j - 1) / 2; 601 if (comp(v[p], v[j])) 602 Swap(v[j], v[p]); 603 else 604 break; 605 } 606 } 607 // Stage 2: swap largest element with the last one, 608 // and sink the new top. 609 for (uptr i = size - 1; i > 0; i--) { 610 Swap(v[0], v[i]); 611 uptr j, max_ind; 612 for (j = 0; j < i; j = max_ind) { 613 uptr left = 2 * j + 1; 614 uptr right = 2 * j + 2; 615 max_ind = j; 616 if (left < i && comp(v[max_ind], v[left])) 617 max_ind = left; 618 if (right < i && comp(v[max_ind], v[right])) 619 max_ind = right; 620 if (max_ind != j) 621 Swap(v[j], v[max_ind]); 622 else 623 break; 624 } 625 } 626 } 627 628 // Works like std::lower_bound: finds the first element that is not less 629 // than the val. 630 template <class Container, class Value, class Compare> 631 uptr InternalLowerBound(const Container &v, uptr first, uptr last, 632 const Value &val, Compare comp) { 633 while (last > first) { 634 uptr mid = (first + last) / 2; 635 if (comp(v[mid], val)) 636 first = mid + 1; 637 else 638 last = mid; 639 } 640 return first; 641 } 642 643 enum ModuleArch { 644 kModuleArchUnknown, 645 kModuleArchI386, 646 kModuleArchX86_64, 647 kModuleArchX86_64H, 648 kModuleArchARMV6, 649 kModuleArchARMV7, 650 kModuleArchARMV7S, 651 kModuleArchARMV7K, 652 kModuleArchARM64 653 }; 654 655 // Opens the file 'file_name" and reads up to 'max_len' bytes. 656 // The resulting buffer is mmaped and stored in '*buff'. 657 // Returns true if file was successfully opened and read. 658 bool ReadFileToVector(const char *file_name, 659 InternalMmapVectorNoCtor<char> *buff, 660 uptr max_len = 1 << 26, error_t *errno_p = nullptr); 661 662 // Opens the file 'file_name" and reads up to 'max_len' bytes. 663 // This function is less I/O efficient than ReadFileToVector as it may reread 664 // file multiple times to avoid mmap during read attempts. It's used to read 665 // procmap, so short reads with mmap in between can produce inconsistent result. 666 // The resulting buffer is mmaped and stored in '*buff'. 667 // The size of the mmaped region is stored in '*buff_size'. 668 // The total number of read bytes is stored in '*read_len'. 669 // Returns true if file was successfully opened and read. 670 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, 671 uptr *read_len, uptr max_len = 1 << 26, 672 error_t *errno_p = nullptr); 673 674 // When adding a new architecture, don't forget to also update 675 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp. 676 inline const char *ModuleArchToString(ModuleArch arch) { 677 switch (arch) { 678 case kModuleArchUnknown: 679 return ""; 680 case kModuleArchI386: 681 return "i386"; 682 case kModuleArchX86_64: 683 return "x86_64"; 684 case kModuleArchX86_64H: 685 return "x86_64h"; 686 case kModuleArchARMV6: 687 return "armv6"; 688 case kModuleArchARMV7: 689 return "armv7"; 690 case kModuleArchARMV7S: 691 return "armv7s"; 692 case kModuleArchARMV7K: 693 return "armv7k"; 694 case kModuleArchARM64: 695 return "arm64"; 696 } 697 CHECK(0 && "Invalid module arch"); 698 return ""; 699 } 700 701 const uptr kModuleUUIDSize = 16; 702 const uptr kMaxSegName = 16; 703 704 // Represents a binary loaded into virtual memory (e.g. this can be an 705 // executable or a shared object). 706 class LoadedModule { 707 public: 708 LoadedModule() 709 : full_name_(nullptr), 710 base_address_(0), 711 max_executable_address_(0), 712 arch_(kModuleArchUnknown), 713 instrumented_(false) { 714 internal_memset(uuid_, 0, kModuleUUIDSize); 715 ranges_.clear(); 716 } 717 void set(const char *module_name, uptr base_address); 718 void set(const char *module_name, uptr base_address, ModuleArch arch, 719 u8 uuid[kModuleUUIDSize], bool instrumented); 720 void clear(); 721 void addAddressRange(uptr beg, uptr end, bool executable, bool writable, 722 const char *name = nullptr); 723 bool containsAddress(uptr address) const; 724 725 const char *full_name() const { return full_name_; } 726 uptr base_address() const { return base_address_; } 727 uptr max_executable_address() const { return max_executable_address_; } 728 ModuleArch arch() const { return arch_; } 729 const u8 *uuid() const { return uuid_; } 730 bool instrumented() const { return instrumented_; } 731 732 struct AddressRange { 733 AddressRange *next; 734 uptr beg; 735 uptr end; 736 bool executable; 737 bool writable; 738 char name[kMaxSegName]; 739 740 AddressRange(uptr beg, uptr end, bool executable, bool writable, 741 const char *name) 742 : next(nullptr), 743 beg(beg), 744 end(end), 745 executable(executable), 746 writable(writable) { 747 internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name)); 748 } 749 }; 750 751 const IntrusiveList<AddressRange> &ranges() const { return ranges_; } 752 753 private: 754 char *full_name_; // Owned. 755 uptr base_address_; 756 uptr max_executable_address_; 757 ModuleArch arch_; 758 u8 uuid_[kModuleUUIDSize]; 759 bool instrumented_; 760 IntrusiveList<AddressRange> ranges_; 761 }; 762 763 // List of LoadedModules. OS-dependent implementation is responsible for 764 // filling this information. 765 class ListOfModules { 766 public: 767 ListOfModules() : initialized(false) {} 768 ~ListOfModules() { clear(); } 769 void init(); 770 void fallbackInit(); // Uses fallback init if available, otherwise clears 771 const LoadedModule *begin() const { return modules_.begin(); } 772 LoadedModule *begin() { return modules_.begin(); } 773 const LoadedModule *end() const { return modules_.end(); } 774 LoadedModule *end() { return modules_.end(); } 775 uptr size() const { return modules_.size(); } 776 const LoadedModule &operator[](uptr i) const { 777 CHECK_LT(i, modules_.size()); 778 return modules_[i]; 779 } 780 781 private: 782 void clear() { 783 for (auto &module : modules_) module.clear(); 784 modules_.clear(); 785 } 786 void clearOrInit() { 787 initialized ? clear() : modules_.Initialize(kInitialCapacity); 788 initialized = true; 789 } 790 791 InternalMmapVectorNoCtor<LoadedModule> modules_; 792 // We rarely have more than 16K loaded modules. 793 static const uptr kInitialCapacity = 1 << 14; 794 bool initialized; 795 }; 796 797 // Callback type for iterating over a set of memory ranges. 798 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); 799 800 enum AndroidApiLevel { 801 ANDROID_NOT_ANDROID = 0, 802 ANDROID_KITKAT = 19, 803 ANDROID_LOLLIPOP_MR1 = 22, 804 ANDROID_POST_LOLLIPOP = 23 805 }; 806 807 void WriteToSyslog(const char *buffer); 808 809 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__) 810 #define SANITIZER_WIN_TRACE 1 811 #else 812 #define SANITIZER_WIN_TRACE 0 813 #endif 814 815 #if SANITIZER_MAC || SANITIZER_WIN_TRACE 816 void LogFullErrorReport(const char *buffer); 817 #else 818 INLINE void LogFullErrorReport(const char *buffer) {} 819 #endif 820 821 #if SANITIZER_LINUX || SANITIZER_MAC 822 void WriteOneLineToSyslog(const char *s); 823 void LogMessageOnPrintf(const char *str); 824 #else 825 INLINE void WriteOneLineToSyslog(const char *s) {} 826 INLINE void LogMessageOnPrintf(const char *str) {} 827 #endif 828 829 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE 830 // Initialize Android logging. Any writes before this are silently lost. 831 void AndroidLogInit(); 832 void SetAbortMessage(const char *); 833 #else 834 INLINE void AndroidLogInit() {} 835 // FIXME: MacOS implementation could use CRSetCrashLogMessage. 836 INLINE void SetAbortMessage(const char *) {} 837 #endif 838 839 #if SANITIZER_ANDROID 840 void SanitizerInitializeUnwinder(); 841 AndroidApiLevel AndroidGetApiLevel(); 842 #else 843 INLINE void AndroidLogWrite(const char *buffer_unused) {} 844 INLINE void SanitizerInitializeUnwinder() {} 845 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } 846 #endif 847 848 INLINE uptr GetPthreadDestructorIterations() { 849 #if SANITIZER_ANDROID 850 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; 851 #elif SANITIZER_POSIX 852 return 4; 853 #else 854 // Unused on Windows. 855 return 0; 856 #endif 857 } 858 859 void *internal_start_thread(void *(*func)(void*), void *arg); 860 void internal_join_thread(void *th); 861 void MaybeStartBackgroudThread(); 862 863 // Make the compiler think that something is going on there. 864 // Use this inside a loop that looks like memset/memcpy/etc to prevent the 865 // compiler from recognising it and turning it into an actual call to 866 // memset/memcpy/etc. 867 static inline void SanitizerBreakOptimization(void *arg) { 868 #if defined(_MSC_VER) && !defined(__clang__) 869 _ReadWriteBarrier(); 870 #else 871 __asm__ __volatile__("" : : "r" (arg) : "memory"); 872 #endif 873 } 874 875 struct SignalContext { 876 void *siginfo; 877 void *context; 878 uptr addr; 879 uptr pc; 880 uptr sp; 881 uptr bp; 882 bool is_memory_access; 883 enum WriteFlag { UNKNOWN, READ, WRITE } write_flag; 884 885 // In some cases the kernel cannot provide the true faulting address; `addr` 886 // will be zero then. This field allows to distinguish between these cases 887 // and dereferences of null. 888 bool is_true_faulting_addr; 889 890 // VS2013 doesn't implement unrestricted unions, so we need a trivial default 891 // constructor 892 SignalContext() = default; 893 894 // Creates signal context in a platform-specific manner. 895 // SignalContext is going to keep pointers to siginfo and context without 896 // owning them. 897 SignalContext(void *siginfo, void *context) 898 : siginfo(siginfo), 899 context(context), 900 addr(GetAddress()), 901 is_memory_access(IsMemoryAccess()), 902 write_flag(GetWriteFlag()), 903 is_true_faulting_addr(IsTrueFaultingAddress()) { 904 InitPcSpBp(); 905 } 906 907 static void DumpAllRegisters(void *context); 908 909 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION. 910 int GetType() const; 911 912 // String description of the signal. 913 const char *Describe() const; 914 915 // Returns true if signal is stack overflow. 916 bool IsStackOverflow() const; 917 918 private: 919 // Platform specific initialization. 920 void InitPcSpBp(); 921 uptr GetAddress() const; 922 WriteFlag GetWriteFlag() const; 923 bool IsMemoryAccess() const; 924 bool IsTrueFaultingAddress() const; 925 }; 926 927 void InitializePlatformEarly(); 928 void MaybeReexec(); 929 930 template <typename Fn> 931 class RunOnDestruction { 932 public: 933 explicit RunOnDestruction(Fn fn) : fn_(fn) {} 934 ~RunOnDestruction() { fn_(); } 935 936 private: 937 Fn fn_; 938 }; 939 940 // A simple scope guard. Usage: 941 // auto cleanup = at_scope_exit([]{ do_cleanup; }); 942 template <typename Fn> 943 RunOnDestruction<Fn> at_scope_exit(Fn fn) { 944 return RunOnDestruction<Fn>(fn); 945 } 946 947 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine 948 // if a process uses virtual memory over 4TB (as many sanitizers like 949 // to do). This function will abort the process if running on a kernel 950 // that looks vulnerable. 951 #if SANITIZER_LINUX && SANITIZER_S390_64 952 void AvoidCVE_2016_2143(); 953 #else 954 INLINE void AvoidCVE_2016_2143() {} 955 #endif 956 957 struct StackDepotStats { 958 uptr n_uniq_ids; 959 uptr allocated; 960 }; 961 962 // The default value for allocator_release_to_os_interval_ms common flag to 963 // indicate that sanitizer allocator should not attempt to release memory to OS. 964 const s32 kReleaseToOSIntervalNever = -1; 965 966 void CheckNoDeepBind(const char *filename, int flag); 967 968 // Returns the requested amount of random data (up to 256 bytes) that can then 969 // be used to seed a PRNG. Defaults to blocking like the underlying syscall. 970 bool GetRandom(void *buffer, uptr length, bool blocking = true); 971 972 // Returns the number of logical processors on the system. 973 u32 GetNumberOfCPUs(); 974 extern u32 NumberOfCPUsCached; 975 INLINE u32 GetNumberOfCPUsCached() { 976 if (!NumberOfCPUsCached) 977 NumberOfCPUsCached = GetNumberOfCPUs(); 978 return NumberOfCPUsCached; 979 } 980 981 template <typename T> 982 class ArrayRef { 983 public: 984 ArrayRef() {} 985 ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {} 986 987 T *begin() { return begin_; } 988 T *end() { return end_; } 989 990 private: 991 T *begin_ = nullptr; 992 T *end_ = nullptr; 993 }; 994 995 } // namespace __sanitizer 996 997 inline void *operator new(__sanitizer::operator_new_size_type size, 998 __sanitizer::LowLevelAllocator &alloc) { // NOLINT 999 return alloc.Allocate(size); 1000 } 1001 1002 #endif // SANITIZER_COMMON_H 1003