1 //===-- sanitizer_win.cpp -------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between AddressSanitizer and ThreadSanitizer 10 // run-time libraries and implements windows-specific functions from 11 // sanitizer_libc.h. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_platform.h" 15 #if SANITIZER_WINDOWS 16 17 #define WIN32_LEAN_AND_MEAN 18 #define NOGDI 19 #include <windows.h> 20 #include <io.h> 21 #include <psapi.h> 22 #include <stdlib.h> 23 24 #include "sanitizer_common.h" 25 #include "sanitizer_file.h" 26 #include "sanitizer_libc.h" 27 #include "sanitizer_mutex.h" 28 #include "sanitizer_placement_new.h" 29 #include "sanitizer_win_defs.h" 30 31 #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1 32 #pragma comment(lib, "psapi") 33 #endif 34 #if SANITIZER_WIN_TRACE 35 #include <traceloggingprovider.h> 36 // Windows trace logging provider init 37 #pragma comment(lib, "advapi32.lib") 38 TRACELOGGING_DECLARE_PROVIDER(g_asan_provider); 39 // GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp 40 TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider", 41 (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b, 42 0x53, 0x0b, 0xd0, 0xf3, 0xfa)); 43 #else 44 #define TraceLoggingUnregister(x) 45 #endif 46 47 // A macro to tell the compiler that this part of the code cannot be reached, 48 // if the compiler supports this feature. Since we're using this in 49 // code that is called when terminating the process, the expansion of the 50 // macro should not terminate the process to avoid infinite recursion. 51 #if defined(__clang__) 52 # define BUILTIN_UNREACHABLE() __builtin_unreachable() 53 #elif defined(__GNUC__) && \ 54 (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) 55 # define BUILTIN_UNREACHABLE() __builtin_unreachable() 56 #elif defined(_MSC_VER) 57 # define BUILTIN_UNREACHABLE() __assume(0) 58 #else 59 # define BUILTIN_UNREACHABLE() 60 #endif 61 62 namespace __sanitizer { 63 64 #include "sanitizer_syscall_generic.inc" 65 66 // --------------------- sanitizer_common.h 67 uptr GetPageSize() { 68 SYSTEM_INFO si; 69 GetSystemInfo(&si); 70 return si.dwPageSize; 71 } 72 73 uptr GetMmapGranularity() { 74 SYSTEM_INFO si; 75 GetSystemInfo(&si); 76 return si.dwAllocationGranularity; 77 } 78 79 uptr GetMaxUserVirtualAddress() { 80 SYSTEM_INFO si; 81 GetSystemInfo(&si); 82 return (uptr)si.lpMaximumApplicationAddress; 83 } 84 85 uptr GetMaxVirtualAddress() { 86 return GetMaxUserVirtualAddress(); 87 } 88 89 bool FileExists(const char *filename) { 90 return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; 91 } 92 93 uptr internal_getpid() { 94 return GetProcessId(GetCurrentProcess()); 95 } 96 97 int internal_dlinfo(void *handle, int request, void *p) { 98 UNIMPLEMENTED(); 99 } 100 101 // In contrast to POSIX, on Windows GetCurrentThreadId() 102 // returns a system-unique identifier. 103 tid_t GetTid() { 104 return GetCurrentThreadId(); 105 } 106 107 uptr GetThreadSelf() { 108 return GetTid(); 109 } 110 111 #if !SANITIZER_GO 112 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 113 uptr *stack_bottom) { 114 CHECK(stack_top); 115 CHECK(stack_bottom); 116 MEMORY_BASIC_INFORMATION mbi; 117 CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0); 118 // FIXME: is it possible for the stack to not be a single allocation? 119 // Are these values what ASan expects to get (reserved, not committed; 120 // including stack guard page) ? 121 *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize; 122 *stack_bottom = (uptr)mbi.AllocationBase; 123 } 124 #endif // #if !SANITIZER_GO 125 126 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { 127 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 128 if (rv == 0) 129 ReportMmapFailureAndDie(size, mem_type, "allocate", 130 GetLastError(), raw_report); 131 return rv; 132 } 133 134 void UnmapOrDie(void *addr, uptr size) { 135 if (!size || !addr) 136 return; 137 138 MEMORY_BASIC_INFORMATION mbi; 139 CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); 140 141 // MEM_RELEASE can only be used to unmap whole regions previously mapped with 142 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that 143 // fails try MEM_DECOMMIT. 144 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { 145 if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { 146 Report("ERROR: %s failed to " 147 "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n", 148 SanitizerToolName, size, size, addr, GetLastError()); 149 CHECK("unable to unmap" && 0); 150 } 151 } 152 } 153 154 static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, 155 const char *mmap_type) { 156 error_t last_error = GetLastError(); 157 if (last_error == ERROR_NOT_ENOUGH_MEMORY) 158 return nullptr; 159 ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); 160 } 161 162 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { 163 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 164 if (rv == 0) 165 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); 166 return rv; 167 } 168 169 // We want to map a chunk of address space aligned to 'alignment'. 170 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 171 const char *mem_type) { 172 CHECK(IsPowerOfTwo(size)); 173 CHECK(IsPowerOfTwo(alignment)); 174 175 // Windows will align our allocations to at least 64K. 176 alignment = Max(alignment, GetMmapGranularity()); 177 178 uptr mapped_addr = 179 (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 180 if (!mapped_addr) 181 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); 182 183 // If we got it right on the first try, return. Otherwise, unmap it and go to 184 // the slow path. 185 if (IsAligned(mapped_addr, alignment)) 186 return (void*)mapped_addr; 187 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) 188 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); 189 190 // If we didn't get an aligned address, overallocate, find an aligned address, 191 // unmap, and try to allocate at that aligned address. 192 int retries = 0; 193 const int kMaxRetries = 10; 194 for (; retries < kMaxRetries && 195 (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); 196 retries++) { 197 // Overallocate size + alignment bytes. 198 mapped_addr = 199 (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); 200 if (!mapped_addr) 201 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); 202 203 // Find the aligned address. 204 uptr aligned_addr = RoundUpTo(mapped_addr, alignment); 205 206 // Free the overallocation. 207 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) 208 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); 209 210 // Attempt to allocate exactly the number of bytes we need at the aligned 211 // address. This may fail for a number of reasons, in which case we continue 212 // the loop. 213 mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, 214 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 215 } 216 217 // Fail if we can't make this work quickly. 218 if (retries == kMaxRetries && mapped_addr == 0) 219 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); 220 221 return (void *)mapped_addr; 222 } 223 224 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { 225 // FIXME: is this really "NoReserve"? On Win32 this does not matter much, 226 // but on Win64 it does. 227 (void)name; // unsupported 228 #if !SANITIZER_GO && SANITIZER_WINDOWS64 229 // On asan/Windows64, use MEM_COMMIT would result in error 230 // 1455:ERROR_COMMITMENT_LIMIT. 231 // Asan uses exception handler to commit page on demand. 232 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE); 233 #else 234 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, 235 PAGE_READWRITE); 236 #endif 237 if (p == 0) { 238 Report("ERROR: %s failed to " 239 "allocate %p (%zd) bytes at %p (error code: %d)\n", 240 SanitizerToolName, size, size, fixed_addr, GetLastError()); 241 return false; 242 } 243 return true; 244 } 245 246 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) { 247 // FIXME: Windows support large pages too. Might be worth checking 248 return MmapFixedNoReserve(fixed_addr, size, name); 249 } 250 251 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by 252 // 'MmapFixedNoAccess'. 253 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) { 254 void *p = VirtualAlloc((LPVOID)fixed_addr, size, 255 MEM_COMMIT, PAGE_READWRITE); 256 if (p == 0) { 257 char mem_type[30]; 258 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", 259 fixed_addr); 260 ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError()); 261 } 262 return p; 263 } 264 265 // Uses fixed_addr for now. 266 // Will use offset instead once we've implemented this function for real. 267 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) { 268 return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size)); 269 } 270 271 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size, 272 const char *name) { 273 return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size)); 274 } 275 276 void ReservedAddressRange::Unmap(uptr addr, uptr size) { 277 // Only unmap if it covers the entire range. 278 CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_)); 279 // We unmap the whole range, just null out the base. 280 base_ = nullptr; 281 size_ = 0; 282 UnmapOrDie(reinterpret_cast<void*>(addr), size); 283 } 284 285 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) { 286 void *p = VirtualAlloc((LPVOID)fixed_addr, size, 287 MEM_COMMIT, PAGE_READWRITE); 288 if (p == 0) { 289 char mem_type[30]; 290 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", 291 fixed_addr); 292 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); 293 } 294 return p; 295 } 296 297 void *MmapNoReserveOrDie(uptr size, const char *mem_type) { 298 // FIXME: make this really NoReserve? 299 return MmapOrDie(size, mem_type); 300 } 301 302 uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { 303 base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); 304 size_ = size; 305 name_ = name; 306 (void)os_handle_; // unsupported 307 return reinterpret_cast<uptr>(base_); 308 } 309 310 311 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { 312 (void)name; // unsupported 313 void *res = VirtualAlloc((LPVOID)fixed_addr, size, 314 MEM_RESERVE, PAGE_NOACCESS); 315 if (res == 0) 316 Report("WARNING: %s failed to " 317 "mprotect %p (%zd) bytes at %p (error code: %d)\n", 318 SanitizerToolName, size, size, fixed_addr, GetLastError()); 319 return res; 320 } 321 322 void *MmapNoAccess(uptr size) { 323 void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS); 324 if (res == 0) 325 Report("WARNING: %s failed to " 326 "mprotect %p (%zd) bytes (error code: %d)\n", 327 SanitizerToolName, size, size, GetLastError()); 328 return res; 329 } 330 331 bool MprotectNoAccess(uptr addr, uptr size) { 332 DWORD old_protection; 333 return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); 334 } 335 336 void ReleaseMemoryPagesToOS(uptr beg, uptr end) { 337 // This is almost useless on 32-bits. 338 // FIXME: add madvise-analog when we move to 64-bits. 339 } 340 341 void SetShadowRegionHugePageMode(uptr addr, uptr size) { 342 // FIXME: probably similar to ReleaseMemoryToOS. 343 } 344 345 bool DontDumpShadowMemory(uptr addr, uptr length) { 346 // This is almost useless on 32-bits. 347 // FIXME: add madvise-analog when we move to 64-bits. 348 return true; 349 } 350 351 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, 352 uptr min_shadow_base_alignment, 353 UNUSED uptr &high_mem_end) { 354 const uptr granularity = GetMmapGranularity(); 355 const uptr alignment = 356 Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); 357 const uptr left_padding = 358 Max<uptr>(granularity, 1ULL << min_shadow_base_alignment); 359 uptr space_size = shadow_size_bytes + left_padding; 360 uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, 361 granularity, nullptr, nullptr); 362 CHECK_NE((uptr)0, shadow_start); 363 CHECK(IsAligned(shadow_start, alignment)); 364 return shadow_start; 365 } 366 367 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, 368 uptr *largest_gap_found, 369 uptr *max_occupied_addr) { 370 uptr address = 0; 371 while (true) { 372 MEMORY_BASIC_INFORMATION info; 373 if (!::VirtualQuery((void*)address, &info, sizeof(info))) 374 return 0; 375 376 if (info.State == MEM_FREE) { 377 uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding, 378 alignment); 379 if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize) 380 return shadow_address; 381 } 382 383 // Move to the next region. 384 address = (uptr)info.BaseAddress + info.RegionSize; 385 } 386 return 0; 387 } 388 389 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { 390 MEMORY_BASIC_INFORMATION mbi; 391 CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); 392 return mbi.Protect == PAGE_NOACCESS && 393 (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; 394 } 395 396 void *MapFileToMemory(const char *file_name, uptr *buff_size) { 397 UNIMPLEMENTED(); 398 } 399 400 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { 401 UNIMPLEMENTED(); 402 } 403 404 static const int kMaxEnvNameLength = 128; 405 static const DWORD kMaxEnvValueLength = 32767; 406 407 namespace { 408 409 struct EnvVariable { 410 char name[kMaxEnvNameLength]; 411 char value[kMaxEnvValueLength]; 412 }; 413 414 } // namespace 415 416 static const int kEnvVariables = 5; 417 static EnvVariable env_vars[kEnvVariables]; 418 static int num_env_vars; 419 420 const char *GetEnv(const char *name) { 421 // Note: this implementation caches the values of the environment variables 422 // and limits their quantity. 423 for (int i = 0; i < num_env_vars; i++) { 424 if (0 == internal_strcmp(name, env_vars[i].name)) 425 return env_vars[i].value; 426 } 427 CHECK_LT(num_env_vars, kEnvVariables); 428 DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, 429 kMaxEnvValueLength); 430 if (rv > 0 && rv < kMaxEnvValueLength) { 431 CHECK_LT(internal_strlen(name), kMaxEnvNameLength); 432 internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); 433 num_env_vars++; 434 return env_vars[num_env_vars - 1].value; 435 } 436 return 0; 437 } 438 439 const char *GetPwd() { 440 UNIMPLEMENTED(); 441 } 442 443 u32 GetUid() { 444 UNIMPLEMENTED(); 445 } 446 447 namespace { 448 struct ModuleInfo { 449 const char *filepath; 450 uptr base_address; 451 uptr end_address; 452 }; 453 454 #if !SANITIZER_GO 455 int CompareModulesBase(const void *pl, const void *pr) { 456 const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr; 457 if (l->base_address < r->base_address) 458 return -1; 459 return l->base_address > r->base_address; 460 } 461 #endif 462 } // namespace 463 464 #if !SANITIZER_GO 465 void DumpProcessMap() { 466 Report("Dumping process modules:\n"); 467 ListOfModules modules; 468 modules.init(); 469 uptr num_modules = modules.size(); 470 471 InternalMmapVector<ModuleInfo> module_infos(num_modules); 472 for (size_t i = 0; i < num_modules; ++i) { 473 module_infos[i].filepath = modules[i].full_name(); 474 module_infos[i].base_address = modules[i].ranges().front()->beg; 475 module_infos[i].end_address = modules[i].ranges().back()->end; 476 } 477 qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), 478 CompareModulesBase); 479 480 for (size_t i = 0; i < num_modules; ++i) { 481 const ModuleInfo &mi = module_infos[i]; 482 if (mi.end_address != 0) { 483 Printf("\t%p-%p %s\n", mi.base_address, mi.end_address, 484 mi.filepath[0] ? mi.filepath : "[no name]"); 485 } else if (mi.filepath[0]) { 486 Printf("\t??\?-??? %s\n", mi.filepath); 487 } else { 488 Printf("\t???\n"); 489 } 490 } 491 } 492 #endif 493 494 void DisableCoreDumperIfNecessary() { 495 // Do nothing. 496 } 497 498 void ReExec() { 499 UNIMPLEMENTED(); 500 } 501 502 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} 503 504 bool StackSizeIsUnlimited() { 505 UNIMPLEMENTED(); 506 } 507 508 void SetStackSizeLimitInBytes(uptr limit) { 509 UNIMPLEMENTED(); 510 } 511 512 bool AddressSpaceIsUnlimited() { 513 UNIMPLEMENTED(); 514 } 515 516 void SetAddressSpaceUnlimited() { 517 UNIMPLEMENTED(); 518 } 519 520 bool IsPathSeparator(const char c) { 521 return c == '\\' || c == '/'; 522 } 523 524 static bool IsAlpha(char c) { 525 c = ToLower(c); 526 return c >= 'a' && c <= 'z'; 527 } 528 529 bool IsAbsolutePath(const char *path) { 530 return path != nullptr && IsAlpha(path[0]) && path[1] == ':' && 531 IsPathSeparator(path[2]); 532 } 533 534 void SleepForSeconds(int seconds) { 535 Sleep(seconds * 1000); 536 } 537 538 void SleepForMillis(int millis) { 539 Sleep(millis); 540 } 541 542 u64 NanoTime() { 543 static LARGE_INTEGER frequency = {}; 544 LARGE_INTEGER counter; 545 if (UNLIKELY(frequency.QuadPart == 0)) { 546 QueryPerformanceFrequency(&frequency); 547 CHECK_NE(frequency.QuadPart, 0); 548 } 549 QueryPerformanceCounter(&counter); 550 counter.QuadPart *= 1000ULL * 1000000ULL; 551 counter.QuadPart /= frequency.QuadPart; 552 return counter.QuadPart; 553 } 554 555 u64 MonotonicNanoTime() { return NanoTime(); } 556 557 void Abort() { 558 internal__exit(3); 559 } 560 561 #if !SANITIZER_GO 562 // Read the file to extract the ImageBase field from the PE header. If ASLR is 563 // disabled and this virtual address is available, the loader will typically 564 // load the image at this address. Therefore, we call it the preferred base. Any 565 // addresses in the DWARF typically assume that the object has been loaded at 566 // this address. 567 static uptr GetPreferredBase(const char *modname) { 568 fd_t fd = OpenFile(modname, RdOnly, nullptr); 569 if (fd == kInvalidFd) 570 return 0; 571 FileCloser closer(fd); 572 573 // Read just the DOS header. 574 IMAGE_DOS_HEADER dos_header; 575 uptr bytes_read; 576 if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || 577 bytes_read != sizeof(dos_header)) 578 return 0; 579 580 // The file should start with the right signature. 581 if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) 582 return 0; 583 584 // The layout at e_lfanew is: 585 // "PE\0\0" 586 // IMAGE_FILE_HEADER 587 // IMAGE_OPTIONAL_HEADER 588 // Seek to e_lfanew and read all that data. 589 char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)]; 590 if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == 591 INVALID_SET_FILE_POINTER) 592 return 0; 593 if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) || 594 bytes_read != sizeof(buf)) 595 return 0; 596 597 // Check for "PE\0\0" before the PE header. 598 char *pe_sig = &buf[0]; 599 if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0) 600 return 0; 601 602 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. 603 IMAGE_OPTIONAL_HEADER *pe_header = 604 (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); 605 606 // Check for more magic in the PE header. 607 if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) 608 return 0; 609 610 // Finally, return the ImageBase. 611 return (uptr)pe_header->ImageBase; 612 } 613 614 #ifdef __clang__ 615 #pragma clang diagnostic push 616 #pragma clang diagnostic ignored "-Wframe-larger-than=" 617 #endif 618 void ListOfModules::init() { 619 clearOrInit(); 620 HANDLE cur_process = GetCurrentProcess(); 621 622 // Query the list of modules. Start by assuming there are no more than 256 623 // modules and retry if that's not sufficient. 624 HMODULE *hmodules = 0; 625 uptr modules_buffer_size = sizeof(HMODULE) * 256; 626 DWORD bytes_required; 627 while (!hmodules) { 628 hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); 629 CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, 630 &bytes_required)); 631 if (bytes_required > modules_buffer_size) { 632 // Either there turned out to be more than 256 hmodules, or new hmodules 633 // could have loaded since the last try. Retry. 634 UnmapOrDie(hmodules, modules_buffer_size); 635 hmodules = 0; 636 modules_buffer_size = bytes_required; 637 } 638 } 639 640 // |num_modules| is the number of modules actually present, 641 size_t num_modules = bytes_required / sizeof(HMODULE); 642 for (size_t i = 0; i < num_modules; ++i) { 643 HMODULE handle = hmodules[i]; 644 MODULEINFO mi; 645 if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) 646 continue; 647 648 // Get the UTF-16 path and convert to UTF-8. 649 wchar_t modname_utf16[kMaxPathLength]; 650 int modname_utf16_len = 651 GetModuleFileNameW(handle, modname_utf16, kMaxPathLength); 652 if (modname_utf16_len == 0) 653 modname_utf16[0] = '\0'; 654 char module_name[kMaxPathLength]; 655 int module_name_len = 656 ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1, 657 &module_name[0], kMaxPathLength, NULL, NULL); 658 module_name[module_name_len] = '\0'; 659 660 uptr base_address = (uptr)mi.lpBaseOfDll; 661 uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; 662 663 // Adjust the base address of the module so that we get a VA instead of an 664 // RVA when computing the module offset. This helps llvm-symbolizer find the 665 // right DWARF CU. In the common case that the image is loaded at it's 666 // preferred address, we will now print normal virtual addresses. 667 uptr preferred_base = GetPreferredBase(&module_name[0]); 668 uptr adjusted_base = base_address - preferred_base; 669 670 LoadedModule cur_module; 671 cur_module.set(module_name, adjusted_base); 672 // We add the whole module as one single address range. 673 cur_module.addAddressRange(base_address, end_address, /*executable*/ true, 674 /*writable*/ true); 675 modules_.push_back(cur_module); 676 } 677 UnmapOrDie(hmodules, modules_buffer_size); 678 } 679 #ifdef __clang__ 680 #pragma clang diagnostic pop 681 #endif 682 683 void ListOfModules::fallbackInit() { clear(); } 684 685 // We can't use atexit() directly at __asan_init time as the CRT is not fully 686 // initialized at this point. Place the functions into a vector and use 687 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). 688 InternalMmapVectorNoCtor<void (*)(void)> atexit_functions; 689 690 int Atexit(void (*function)(void)) { 691 atexit_functions.push_back(function); 692 return 0; 693 } 694 695 static int RunAtexit() { 696 TraceLoggingUnregister(g_asan_provider); 697 int ret = 0; 698 for (uptr i = 0; i < atexit_functions.size(); ++i) { 699 ret |= atexit(atexit_functions[i]); 700 } 701 return ret; 702 } 703 704 #pragma section(".CRT$XID", long, read) 705 __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit; 706 #endif 707 708 // ------------------ sanitizer_libc.h 709 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { 710 // FIXME: Use the wide variants to handle Unicode filenames. 711 fd_t res; 712 if (mode == RdOnly) { 713 res = CreateFileA(filename, GENERIC_READ, 714 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, 715 nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); 716 } else if (mode == WrOnly) { 717 res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, 718 FILE_ATTRIBUTE_NORMAL, nullptr); 719 } else { 720 UNIMPLEMENTED(); 721 } 722 CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); 723 CHECK(res != kStderrFd || kStderrFd == kInvalidFd); 724 if (res == kInvalidFd && last_error) 725 *last_error = GetLastError(); 726 return res; 727 } 728 729 void CloseFile(fd_t fd) { 730 CloseHandle(fd); 731 } 732 733 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, 734 error_t *error_p) { 735 CHECK(fd != kInvalidFd); 736 737 // bytes_read can't be passed directly to ReadFile: 738 // uptr is unsigned long long on 64-bit Windows. 739 unsigned long num_read_long; 740 741 bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); 742 if (!success && error_p) 743 *error_p = GetLastError(); 744 if (bytes_read) 745 *bytes_read = num_read_long; 746 return success; 747 } 748 749 bool SupportsColoredOutput(fd_t fd) { 750 // FIXME: support colored output. 751 return false; 752 } 753 754 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, 755 error_t *error_p) { 756 CHECK(fd != kInvalidFd); 757 758 // Handle null optional parameters. 759 error_t dummy_error; 760 error_p = error_p ? error_p : &dummy_error; 761 uptr dummy_bytes_written; 762 bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; 763 764 // Initialize output parameters in case we fail. 765 *error_p = 0; 766 *bytes_written = 0; 767 768 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be 769 // closed, in which case this will fail. 770 if (fd == kStdoutFd || fd == kStderrFd) { 771 fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); 772 if (fd == 0) { 773 *error_p = ERROR_INVALID_HANDLE; 774 return false; 775 } 776 } 777 778 DWORD bytes_written_32; 779 if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { 780 *error_p = GetLastError(); 781 return false; 782 } else { 783 *bytes_written = bytes_written_32; 784 return true; 785 } 786 } 787 788 uptr internal_sched_yield() { 789 Sleep(0); 790 return 0; 791 } 792 793 void internal__exit(int exitcode) { 794 TraceLoggingUnregister(g_asan_provider); 795 // ExitProcess runs some finalizers, so use TerminateProcess to avoid that. 796 // The debugger doesn't stop on TerminateProcess like it does on ExitProcess, 797 // so add our own breakpoint here. 798 if (::IsDebuggerPresent()) 799 __debugbreak(); 800 TerminateProcess(GetCurrentProcess(), exitcode); 801 BUILTIN_UNREACHABLE(); 802 } 803 804 uptr internal_ftruncate(fd_t fd, uptr size) { 805 UNIMPLEMENTED(); 806 } 807 808 uptr GetRSS() { 809 PROCESS_MEMORY_COUNTERS counters; 810 if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters))) 811 return 0; 812 return counters.WorkingSetSize; 813 } 814 815 void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } 816 void internal_join_thread(void *th) { } 817 818 // ---------------------- BlockingMutex ---------------- {{{1 819 820 BlockingMutex::BlockingMutex() { 821 CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_)); 822 internal_memset(this, 0, sizeof(*this)); 823 } 824 825 void BlockingMutex::Lock() { 826 AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_); 827 CHECK_EQ(owner_, 0); 828 owner_ = GetThreadSelf(); 829 } 830 831 void BlockingMutex::Unlock() { 832 CheckLocked(); 833 owner_ = 0; 834 ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_); 835 } 836 837 void BlockingMutex::CheckLocked() { 838 CHECK_EQ(owner_, GetThreadSelf()); 839 } 840 841 uptr GetTlsSize() { 842 return 0; 843 } 844 845 void InitTlsSize() { 846 } 847 848 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 849 uptr *tls_addr, uptr *tls_size) { 850 #if SANITIZER_GO 851 *stk_addr = 0; 852 *stk_size = 0; 853 *tls_addr = 0; 854 *tls_size = 0; 855 #else 856 uptr stack_top, stack_bottom; 857 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); 858 *stk_addr = stack_bottom; 859 *stk_size = stack_top - stack_bottom; 860 *tls_addr = 0; 861 *tls_size = 0; 862 #endif 863 } 864 865 void ReportFile::Write(const char *buffer, uptr length) { 866 SpinMutexLock l(mu); 867 ReopenIfNecessary(); 868 if (!WriteToFile(fd, buffer, length)) { 869 // stderr may be closed, but we may be able to print to the debugger 870 // instead. This is the case when launching a program from Visual Studio, 871 // and the following routine should write to its console. 872 OutputDebugStringA(buffer); 873 } 874 } 875 876 void SetAlternateSignalStack() { 877 // FIXME: Decide what to do on Windows. 878 } 879 880 void UnsetAlternateSignalStack() { 881 // FIXME: Decide what to do on Windows. 882 } 883 884 void InstallDeadlySignalHandlers(SignalHandlerType handler) { 885 (void)handler; 886 // FIXME: Decide what to do on Windows. 887 } 888 889 HandleSignalMode GetHandleSignalMode(int signum) { 890 // FIXME: Decide what to do on Windows. 891 return kHandleSignalNo; 892 } 893 894 // Check based on flags if we should handle this exception. 895 bool IsHandledDeadlyException(DWORD exceptionCode) { 896 switch (exceptionCode) { 897 case EXCEPTION_ACCESS_VIOLATION: 898 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: 899 case EXCEPTION_STACK_OVERFLOW: 900 case EXCEPTION_DATATYPE_MISALIGNMENT: 901 case EXCEPTION_IN_PAGE_ERROR: 902 return common_flags()->handle_segv; 903 case EXCEPTION_ILLEGAL_INSTRUCTION: 904 case EXCEPTION_PRIV_INSTRUCTION: 905 case EXCEPTION_BREAKPOINT: 906 return common_flags()->handle_sigill; 907 case EXCEPTION_FLT_DENORMAL_OPERAND: 908 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 909 case EXCEPTION_FLT_INEXACT_RESULT: 910 case EXCEPTION_FLT_INVALID_OPERATION: 911 case EXCEPTION_FLT_OVERFLOW: 912 case EXCEPTION_FLT_STACK_CHECK: 913 case EXCEPTION_FLT_UNDERFLOW: 914 case EXCEPTION_INT_DIVIDE_BY_ZERO: 915 case EXCEPTION_INT_OVERFLOW: 916 return common_flags()->handle_sigfpe; 917 } 918 return false; 919 } 920 921 bool IsAccessibleMemoryRange(uptr beg, uptr size) { 922 SYSTEM_INFO si; 923 GetNativeSystemInfo(&si); 924 uptr page_size = si.dwPageSize; 925 uptr page_mask = ~(page_size - 1); 926 927 for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; 928 page <= end;) { 929 MEMORY_BASIC_INFORMATION info; 930 if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) 931 return false; 932 933 if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || 934 info.Protect == PAGE_EXECUTE) 935 return false; 936 937 if (info.RegionSize == 0) 938 return false; 939 940 page += info.RegionSize; 941 } 942 943 return true; 944 } 945 946 bool SignalContext::IsStackOverflow() const { 947 return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW; 948 } 949 950 void SignalContext::InitPcSpBp() { 951 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; 952 CONTEXT *context_record = (CONTEXT *)context; 953 954 pc = (uptr)exception_record->ExceptionAddress; 955 #ifdef _WIN64 956 bp = (uptr)context_record->Rbp; 957 sp = (uptr)context_record->Rsp; 958 #else 959 bp = (uptr)context_record->Ebp; 960 sp = (uptr)context_record->Esp; 961 #endif 962 } 963 964 uptr SignalContext::GetAddress() const { 965 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; 966 if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) 967 return exception_record->ExceptionInformation[1]; 968 return (uptr)exception_record->ExceptionAddress; 969 } 970 971 bool SignalContext::IsMemoryAccess() const { 972 return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode == 973 EXCEPTION_ACCESS_VIOLATION; 974 } 975 976 bool SignalContext::IsTrueFaultingAddress() const { return true; } 977 978 SignalContext::WriteFlag SignalContext::GetWriteFlag() const { 979 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; 980 981 // The write flag is only available for access violation exceptions. 982 if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) 983 return SignalContext::UNKNOWN; 984 985 // The contents of this array are documented at 986 // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record 987 // The first element indicates read as 0, write as 1, or execute as 8. The 988 // second element is the faulting address. 989 switch (exception_record->ExceptionInformation[0]) { 990 case 0: 991 return SignalContext::READ; 992 case 1: 993 return SignalContext::WRITE; 994 case 8: 995 return SignalContext::UNKNOWN; 996 } 997 return SignalContext::UNKNOWN; 998 } 999 1000 void SignalContext::DumpAllRegisters(void *context) { 1001 // FIXME: Implement this. 1002 } 1003 1004 int SignalContext::GetType() const { 1005 return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode; 1006 } 1007 1008 const char *SignalContext::Describe() const { 1009 unsigned code = GetType(); 1010 // Get the string description of the exception if this is a known deadly 1011 // exception. 1012 switch (code) { 1013 case EXCEPTION_ACCESS_VIOLATION: 1014 return "access-violation"; 1015 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: 1016 return "array-bounds-exceeded"; 1017 case EXCEPTION_STACK_OVERFLOW: 1018 return "stack-overflow"; 1019 case EXCEPTION_DATATYPE_MISALIGNMENT: 1020 return "datatype-misalignment"; 1021 case EXCEPTION_IN_PAGE_ERROR: 1022 return "in-page-error"; 1023 case EXCEPTION_ILLEGAL_INSTRUCTION: 1024 return "illegal-instruction"; 1025 case EXCEPTION_PRIV_INSTRUCTION: 1026 return "priv-instruction"; 1027 case EXCEPTION_BREAKPOINT: 1028 return "breakpoint"; 1029 case EXCEPTION_FLT_DENORMAL_OPERAND: 1030 return "flt-denormal-operand"; 1031 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 1032 return "flt-divide-by-zero"; 1033 case EXCEPTION_FLT_INEXACT_RESULT: 1034 return "flt-inexact-result"; 1035 case EXCEPTION_FLT_INVALID_OPERATION: 1036 return "flt-invalid-operation"; 1037 case EXCEPTION_FLT_OVERFLOW: 1038 return "flt-overflow"; 1039 case EXCEPTION_FLT_STACK_CHECK: 1040 return "flt-stack-check"; 1041 case EXCEPTION_FLT_UNDERFLOW: 1042 return "flt-underflow"; 1043 case EXCEPTION_INT_DIVIDE_BY_ZERO: 1044 return "int-divide-by-zero"; 1045 case EXCEPTION_INT_OVERFLOW: 1046 return "int-overflow"; 1047 } 1048 return "unknown exception"; 1049 } 1050 1051 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { 1052 // FIXME: Actually implement this function. 1053 CHECK_GT(buf_len, 0); 1054 buf[0] = 0; 1055 return 0; 1056 } 1057 1058 uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { 1059 return ReadBinaryName(buf, buf_len); 1060 } 1061 1062 void CheckVMASize() { 1063 // Do nothing. 1064 } 1065 1066 void InitializePlatformEarly() { 1067 // Do nothing. 1068 } 1069 1070 void MaybeReexec() { 1071 // No need to re-exec on Windows. 1072 } 1073 1074 void CheckASLR() { 1075 // Do nothing 1076 } 1077 1078 void CheckMPROTECT() { 1079 // Do nothing 1080 } 1081 1082 char **GetArgv() { 1083 // FIXME: Actually implement this function. 1084 return 0; 1085 } 1086 1087 char **GetEnviron() { 1088 // FIXME: Actually implement this function. 1089 return 0; 1090 } 1091 1092 pid_t StartSubprocess(const char *program, const char *const argv[], 1093 const char *const envp[], fd_t stdin_fd, fd_t stdout_fd, 1094 fd_t stderr_fd) { 1095 // FIXME: implement on this platform 1096 // Should be implemented based on 1097 // SymbolizerProcess::StarAtSymbolizerSubprocess 1098 // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp. 1099 return -1; 1100 } 1101 1102 bool IsProcessRunning(pid_t pid) { 1103 // FIXME: implement on this platform. 1104 return false; 1105 } 1106 1107 int WaitForProcess(pid_t pid) { return -1; } 1108 1109 // FIXME implement on this platform. 1110 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { } 1111 1112 void CheckNoDeepBind(const char *filename, int flag) { 1113 // Do nothing. 1114 } 1115 1116 // FIXME: implement on this platform. 1117 bool GetRandom(void *buffer, uptr length, bool blocking) { 1118 UNIMPLEMENTED(); 1119 } 1120 1121 u32 GetNumberOfCPUs() { 1122 SYSTEM_INFO sysinfo = {}; 1123 GetNativeSystemInfo(&sysinfo); 1124 return sysinfo.dwNumberOfProcessors; 1125 } 1126 1127 #if SANITIZER_WIN_TRACE 1128 // TODO(mcgov): Rename this project-wide to PlatformLogInit 1129 void AndroidLogInit(void) { 1130 HRESULT hr = TraceLoggingRegister(g_asan_provider); 1131 if (!SUCCEEDED(hr)) 1132 return; 1133 } 1134 1135 void SetAbortMessage(const char *) {} 1136 1137 void LogFullErrorReport(const char *buffer) { 1138 if (common_flags()->log_to_syslog) { 1139 InternalMmapVector<wchar_t> filename; 1140 DWORD filename_length = 0; 1141 do { 1142 filename.resize(filename.size() + 0x100); 1143 filename_length = 1144 GetModuleFileNameW(NULL, filename.begin(), filename.size()); 1145 } while (filename_length >= filename.size()); 1146 TraceLoggingWrite(g_asan_provider, "AsanReportEvent", 1147 TraceLoggingValue(filename.begin(), "ExecutableName"), 1148 TraceLoggingValue(buffer, "AsanReportContents")); 1149 } 1150 } 1151 #endif // SANITIZER_WIN_TRACE 1152 1153 void InitializePlatformCommonFlags(CommonFlags *cf) {} 1154 1155 } // namespace __sanitizer 1156 1157 #endif // _WIN32 1158