1 //===-- sanitizer_win.cpp -------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between AddressSanitizer and ThreadSanitizer 10 // run-time libraries and implements windows-specific functions from 11 // sanitizer_libc.h. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_platform.h" 15 #if SANITIZER_WINDOWS 16 17 #define WIN32_LEAN_AND_MEAN 18 #define NOGDI 19 #include <direct.h> 20 #include <windows.h> 21 #include <io.h> 22 #include <psapi.h> 23 #include <stdlib.h> 24 25 #include "sanitizer_common.h" 26 #include "sanitizer_file.h" 27 #include "sanitizer_libc.h" 28 #include "sanitizer_mutex.h" 29 #include "sanitizer_placement_new.h" 30 #include "sanitizer_win_defs.h" 31 32 #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1 33 #pragma comment(lib, "psapi") 34 #endif 35 #if SANITIZER_WIN_TRACE 36 #include <traceloggingprovider.h> 37 // Windows trace logging provider init 38 #pragma comment(lib, "advapi32.lib") 39 TRACELOGGING_DECLARE_PROVIDER(g_asan_provider); 40 // GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp 41 TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider", 42 (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b, 43 0x53, 0x0b, 0xd0, 0xf3, 0xfa)); 44 #else 45 #define TraceLoggingUnregister(x) 46 #endif 47 48 // For WaitOnAddress 49 # pragma comment(lib, "synchronization.lib") 50 51 // A macro to tell the compiler that this part of the code cannot be reached, 52 // if the compiler supports this feature. Since we're using this in 53 // code that is called when terminating the process, the expansion of the 54 // macro should not terminate the process to avoid infinite recursion. 55 #if defined(__clang__) 56 # define BUILTIN_UNREACHABLE() __builtin_unreachable() 57 #elif defined(__GNUC__) && \ 58 (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) 59 # define BUILTIN_UNREACHABLE() __builtin_unreachable() 60 #elif defined(_MSC_VER) 61 # define BUILTIN_UNREACHABLE() __assume(0) 62 #else 63 # define BUILTIN_UNREACHABLE() 64 #endif 65 66 namespace __sanitizer { 67 68 #include "sanitizer_syscall_generic.inc" 69 70 // --------------------- sanitizer_common.h 71 uptr GetPageSize() { 72 SYSTEM_INFO si; 73 GetSystemInfo(&si); 74 return si.dwPageSize; 75 } 76 77 uptr GetMmapGranularity() { 78 SYSTEM_INFO si; 79 GetSystemInfo(&si); 80 return si.dwAllocationGranularity; 81 } 82 83 uptr GetMaxUserVirtualAddress() { 84 SYSTEM_INFO si; 85 GetSystemInfo(&si); 86 return (uptr)si.lpMaximumApplicationAddress; 87 } 88 89 uptr GetMaxVirtualAddress() { 90 return GetMaxUserVirtualAddress(); 91 } 92 93 bool FileExists(const char *filename) { 94 return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; 95 } 96 97 uptr internal_getpid() { 98 return GetProcessId(GetCurrentProcess()); 99 } 100 101 int internal_dlinfo(void *handle, int request, void *p) { 102 UNIMPLEMENTED(); 103 } 104 105 // In contrast to POSIX, on Windows GetCurrentThreadId() 106 // returns a system-unique identifier. 107 tid_t GetTid() { 108 return GetCurrentThreadId(); 109 } 110 111 uptr GetThreadSelf() { 112 return GetTid(); 113 } 114 115 #if !SANITIZER_GO 116 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 117 uptr *stack_bottom) { 118 CHECK(stack_top); 119 CHECK(stack_bottom); 120 MEMORY_BASIC_INFORMATION mbi; 121 CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0); 122 // FIXME: is it possible for the stack to not be a single allocation? 123 // Are these values what ASan expects to get (reserved, not committed; 124 // including stack guard page) ? 125 *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize; 126 *stack_bottom = (uptr)mbi.AllocationBase; 127 } 128 #endif // #if !SANITIZER_GO 129 130 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { 131 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 132 if (rv == 0) 133 ReportMmapFailureAndDie(size, mem_type, "allocate", 134 GetLastError(), raw_report); 135 return rv; 136 } 137 138 void UnmapOrDie(void *addr, uptr size) { 139 if (!size || !addr) 140 return; 141 142 MEMORY_BASIC_INFORMATION mbi; 143 CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); 144 145 // MEM_RELEASE can only be used to unmap whole regions previously mapped with 146 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that 147 // fails try MEM_DECOMMIT. 148 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { 149 if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { 150 Report("ERROR: %s failed to " 151 "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n", 152 SanitizerToolName, size, size, addr, GetLastError()); 153 CHECK("unable to unmap" && 0); 154 } 155 } 156 } 157 158 static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, 159 const char *mmap_type) { 160 error_t last_error = GetLastError(); 161 if (last_error == ERROR_NOT_ENOUGH_MEMORY) 162 return nullptr; 163 ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); 164 } 165 166 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { 167 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 168 if (rv == 0) 169 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); 170 return rv; 171 } 172 173 // We want to map a chunk of address space aligned to 'alignment'. 174 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 175 const char *mem_type) { 176 CHECK(IsPowerOfTwo(size)); 177 CHECK(IsPowerOfTwo(alignment)); 178 179 // Windows will align our allocations to at least 64K. 180 alignment = Max(alignment, GetMmapGranularity()); 181 182 uptr mapped_addr = 183 (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 184 if (!mapped_addr) 185 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); 186 187 // If we got it right on the first try, return. Otherwise, unmap it and go to 188 // the slow path. 189 if (IsAligned(mapped_addr, alignment)) 190 return (void*)mapped_addr; 191 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) 192 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); 193 194 // If we didn't get an aligned address, overallocate, find an aligned address, 195 // unmap, and try to allocate at that aligned address. 196 int retries = 0; 197 const int kMaxRetries = 10; 198 for (; retries < kMaxRetries && 199 (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); 200 retries++) { 201 // Overallocate size + alignment bytes. 202 mapped_addr = 203 (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); 204 if (!mapped_addr) 205 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); 206 207 // Find the aligned address. 208 uptr aligned_addr = RoundUpTo(mapped_addr, alignment); 209 210 // Free the overallocation. 211 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) 212 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); 213 214 // Attempt to allocate exactly the number of bytes we need at the aligned 215 // address. This may fail for a number of reasons, in which case we continue 216 // the loop. 217 mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, 218 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 219 } 220 221 // Fail if we can't make this work quickly. 222 if (retries == kMaxRetries && mapped_addr == 0) 223 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); 224 225 return (void *)mapped_addr; 226 } 227 228 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { 229 // FIXME: is this really "NoReserve"? On Win32 this does not matter much, 230 // but on Win64 it does. 231 (void)name; // unsupported 232 #if !SANITIZER_GO && SANITIZER_WINDOWS64 233 // On asan/Windows64, use MEM_COMMIT would result in error 234 // 1455:ERROR_COMMITMENT_LIMIT. 235 // Asan uses exception handler to commit page on demand. 236 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE); 237 #else 238 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, 239 PAGE_READWRITE); 240 #endif 241 if (p == 0) { 242 Report("ERROR: %s failed to " 243 "allocate %p (%zd) bytes at %p (error code: %d)\n", 244 SanitizerToolName, size, size, fixed_addr, GetLastError()); 245 return false; 246 } 247 return true; 248 } 249 250 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) { 251 // FIXME: Windows support large pages too. Might be worth checking 252 return MmapFixedNoReserve(fixed_addr, size, name); 253 } 254 255 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by 256 // 'MmapFixedNoAccess'. 257 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) { 258 void *p = VirtualAlloc((LPVOID)fixed_addr, size, 259 MEM_COMMIT, PAGE_READWRITE); 260 if (p == 0) { 261 char mem_type[30]; 262 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", 263 fixed_addr); 264 ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError()); 265 } 266 return p; 267 } 268 269 // Uses fixed_addr for now. 270 // Will use offset instead once we've implemented this function for real. 271 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) { 272 return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size)); 273 } 274 275 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size, 276 const char *name) { 277 return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size)); 278 } 279 280 void ReservedAddressRange::Unmap(uptr addr, uptr size) { 281 // Only unmap if it covers the entire range. 282 CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_)); 283 // We unmap the whole range, just null out the base. 284 base_ = nullptr; 285 size_ = 0; 286 UnmapOrDie(reinterpret_cast<void*>(addr), size); 287 } 288 289 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) { 290 void *p = VirtualAlloc((LPVOID)fixed_addr, size, 291 MEM_COMMIT, PAGE_READWRITE); 292 if (p == 0) { 293 char mem_type[30]; 294 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", 295 fixed_addr); 296 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); 297 } 298 return p; 299 } 300 301 void *MmapNoReserveOrDie(uptr size, const char *mem_type) { 302 // FIXME: make this really NoReserve? 303 return MmapOrDie(size, mem_type); 304 } 305 306 uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { 307 base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); 308 size_ = size; 309 name_ = name; 310 (void)os_handle_; // unsupported 311 return reinterpret_cast<uptr>(base_); 312 } 313 314 315 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { 316 (void)name; // unsupported 317 void *res = VirtualAlloc((LPVOID)fixed_addr, size, 318 MEM_RESERVE, PAGE_NOACCESS); 319 if (res == 0) 320 Report("WARNING: %s failed to " 321 "mprotect %p (%zd) bytes at %p (error code: %d)\n", 322 SanitizerToolName, size, size, fixed_addr, GetLastError()); 323 return res; 324 } 325 326 void *MmapNoAccess(uptr size) { 327 void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS); 328 if (res == 0) 329 Report("WARNING: %s failed to " 330 "mprotect %p (%zd) bytes (error code: %d)\n", 331 SanitizerToolName, size, size, GetLastError()); 332 return res; 333 } 334 335 bool MprotectNoAccess(uptr addr, uptr size) { 336 DWORD old_protection; 337 return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); 338 } 339 340 void ReleaseMemoryPagesToOS(uptr beg, uptr end) { 341 uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()), 342 end_aligned = RoundDownTo(end, GetPageSizeCached()); 343 CHECK(beg < end); // make sure the region is sane 344 if (beg_aligned == end_aligned) // make sure we're freeing at least 1 page; 345 return; 346 UnmapOrDie((void *)beg, end_aligned - beg_aligned); 347 } 348 349 void SetShadowRegionHugePageMode(uptr addr, uptr size) { 350 // FIXME: probably similar to ReleaseMemoryToOS. 351 } 352 353 bool DontDumpShadowMemory(uptr addr, uptr length) { 354 // This is almost useless on 32-bits. 355 // FIXME: add madvise-analog when we move to 64-bits. 356 return true; 357 } 358 359 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, 360 uptr min_shadow_base_alignment, 361 UNUSED uptr &high_mem_end) { 362 const uptr granularity = GetMmapGranularity(); 363 const uptr alignment = 364 Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); 365 const uptr left_padding = 366 Max<uptr>(granularity, 1ULL << min_shadow_base_alignment); 367 uptr space_size = shadow_size_bytes + left_padding; 368 uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, 369 granularity, nullptr, nullptr); 370 CHECK_NE((uptr)0, shadow_start); 371 CHECK(IsAligned(shadow_start, alignment)); 372 return shadow_start; 373 } 374 375 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, 376 uptr *largest_gap_found, 377 uptr *max_occupied_addr) { 378 uptr address = 0; 379 while (true) { 380 MEMORY_BASIC_INFORMATION info; 381 if (!::VirtualQuery((void*)address, &info, sizeof(info))) 382 return 0; 383 384 if (info.State == MEM_FREE) { 385 uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding, 386 alignment); 387 if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize) 388 return shadow_address; 389 } 390 391 // Move to the next region. 392 address = (uptr)info.BaseAddress + info.RegionSize; 393 } 394 return 0; 395 } 396 397 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, 398 uptr num_aliases, uptr ring_buffer_size) { 399 CHECK(false && "HWASan aliasing is unimplemented on Windows"); 400 return 0; 401 } 402 403 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { 404 MEMORY_BASIC_INFORMATION mbi; 405 CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); 406 return mbi.Protect == PAGE_NOACCESS && 407 (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; 408 } 409 410 void *MapFileToMemory(const char *file_name, uptr *buff_size) { 411 UNIMPLEMENTED(); 412 } 413 414 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { 415 UNIMPLEMENTED(); 416 } 417 418 static const int kMaxEnvNameLength = 128; 419 static const DWORD kMaxEnvValueLength = 32767; 420 421 namespace { 422 423 struct EnvVariable { 424 char name[kMaxEnvNameLength]; 425 char value[kMaxEnvValueLength]; 426 }; 427 428 } // namespace 429 430 static const int kEnvVariables = 5; 431 static EnvVariable env_vars[kEnvVariables]; 432 static int num_env_vars; 433 434 const char *GetEnv(const char *name) { 435 // Note: this implementation caches the values of the environment variables 436 // and limits their quantity. 437 for (int i = 0; i < num_env_vars; i++) { 438 if (0 == internal_strcmp(name, env_vars[i].name)) 439 return env_vars[i].value; 440 } 441 CHECK_LT(num_env_vars, kEnvVariables); 442 DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, 443 kMaxEnvValueLength); 444 if (rv > 0 && rv < kMaxEnvValueLength) { 445 CHECK_LT(internal_strlen(name), kMaxEnvNameLength); 446 internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); 447 num_env_vars++; 448 return env_vars[num_env_vars - 1].value; 449 } 450 return 0; 451 } 452 453 const char *GetPwd() { 454 UNIMPLEMENTED(); 455 } 456 457 u32 GetUid() { 458 UNIMPLEMENTED(); 459 } 460 461 namespace { 462 struct ModuleInfo { 463 const char *filepath; 464 uptr base_address; 465 uptr end_address; 466 }; 467 468 #if !SANITIZER_GO 469 int CompareModulesBase(const void *pl, const void *pr) { 470 const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr; 471 if (l->base_address < r->base_address) 472 return -1; 473 return l->base_address > r->base_address; 474 } 475 #endif 476 } // namespace 477 478 #if !SANITIZER_GO 479 void DumpProcessMap() { 480 Report("Dumping process modules:\n"); 481 ListOfModules modules; 482 modules.init(); 483 uptr num_modules = modules.size(); 484 485 InternalMmapVector<ModuleInfo> module_infos(num_modules); 486 for (size_t i = 0; i < num_modules; ++i) { 487 module_infos[i].filepath = modules[i].full_name(); 488 module_infos[i].base_address = modules[i].ranges().front()->beg; 489 module_infos[i].end_address = modules[i].ranges().back()->end; 490 } 491 qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), 492 CompareModulesBase); 493 494 for (size_t i = 0; i < num_modules; ++i) { 495 const ModuleInfo &mi = module_infos[i]; 496 if (mi.end_address != 0) { 497 Printf("\t%p-%p %s\n", mi.base_address, mi.end_address, 498 mi.filepath[0] ? mi.filepath : "[no name]"); 499 } else if (mi.filepath[0]) { 500 Printf("\t??\?-??? %s\n", mi.filepath); 501 } else { 502 Printf("\t???\n"); 503 } 504 } 505 } 506 #endif 507 508 void DisableCoreDumperIfNecessary() { 509 // Do nothing. 510 } 511 512 void ReExec() { 513 UNIMPLEMENTED(); 514 } 515 516 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} 517 518 bool StackSizeIsUnlimited() { 519 UNIMPLEMENTED(); 520 } 521 522 void SetStackSizeLimitInBytes(uptr limit) { 523 UNIMPLEMENTED(); 524 } 525 526 bool AddressSpaceIsUnlimited() { 527 UNIMPLEMENTED(); 528 } 529 530 void SetAddressSpaceUnlimited() { 531 UNIMPLEMENTED(); 532 } 533 534 bool IsPathSeparator(const char c) { 535 return c == '\\' || c == '/'; 536 } 537 538 static bool IsAlpha(char c) { 539 c = ToLower(c); 540 return c >= 'a' && c <= 'z'; 541 } 542 543 bool IsAbsolutePath(const char *path) { 544 return path != nullptr && IsAlpha(path[0]) && path[1] == ':' && 545 IsPathSeparator(path[2]); 546 } 547 548 void internal_usleep(u64 useconds) { Sleep(useconds / 1000); } 549 550 u64 NanoTime() { 551 static LARGE_INTEGER frequency = {}; 552 LARGE_INTEGER counter; 553 if (UNLIKELY(frequency.QuadPart == 0)) { 554 QueryPerformanceFrequency(&frequency); 555 CHECK_NE(frequency.QuadPart, 0); 556 } 557 QueryPerformanceCounter(&counter); 558 counter.QuadPart *= 1000ULL * 1000000ULL; 559 counter.QuadPart /= frequency.QuadPart; 560 return counter.QuadPart; 561 } 562 563 u64 MonotonicNanoTime() { return NanoTime(); } 564 565 void Abort() { 566 internal__exit(3); 567 } 568 569 bool CreateDir(const char *pathname) { return _mkdir(pathname) == 0; } 570 571 #if !SANITIZER_GO 572 // Read the file to extract the ImageBase field from the PE header. If ASLR is 573 // disabled and this virtual address is available, the loader will typically 574 // load the image at this address. Therefore, we call it the preferred base. Any 575 // addresses in the DWARF typically assume that the object has been loaded at 576 // this address. 577 static uptr GetPreferredBase(const char *modname, char *buf, size_t buf_size) { 578 fd_t fd = OpenFile(modname, RdOnly, nullptr); 579 if (fd == kInvalidFd) 580 return 0; 581 FileCloser closer(fd); 582 583 // Read just the DOS header. 584 IMAGE_DOS_HEADER dos_header; 585 uptr bytes_read; 586 if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || 587 bytes_read != sizeof(dos_header)) 588 return 0; 589 590 // The file should start with the right signature. 591 if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) 592 return 0; 593 594 // The layout at e_lfanew is: 595 // "PE\0\0" 596 // IMAGE_FILE_HEADER 597 // IMAGE_OPTIONAL_HEADER 598 // Seek to e_lfanew and read all that data. 599 if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == 600 INVALID_SET_FILE_POINTER) 601 return 0; 602 if (!ReadFromFile(fd, buf, buf_size, &bytes_read) || bytes_read != buf_size) 603 return 0; 604 605 // Check for "PE\0\0" before the PE header. 606 char *pe_sig = &buf[0]; 607 if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0) 608 return 0; 609 610 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. 611 IMAGE_OPTIONAL_HEADER *pe_header = 612 (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); 613 614 // Check for more magic in the PE header. 615 if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) 616 return 0; 617 618 // Finally, return the ImageBase. 619 return (uptr)pe_header->ImageBase; 620 } 621 622 void ListOfModules::init() { 623 clearOrInit(); 624 HANDLE cur_process = GetCurrentProcess(); 625 626 // Query the list of modules. Start by assuming there are no more than 256 627 // modules and retry if that's not sufficient. 628 HMODULE *hmodules = 0; 629 uptr modules_buffer_size = sizeof(HMODULE) * 256; 630 DWORD bytes_required; 631 while (!hmodules) { 632 hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); 633 CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, 634 &bytes_required)); 635 if (bytes_required > modules_buffer_size) { 636 // Either there turned out to be more than 256 hmodules, or new hmodules 637 // could have loaded since the last try. Retry. 638 UnmapOrDie(hmodules, modules_buffer_size); 639 hmodules = 0; 640 modules_buffer_size = bytes_required; 641 } 642 } 643 644 InternalMmapVector<char> buf(4 + sizeof(IMAGE_FILE_HEADER) + 645 sizeof(IMAGE_OPTIONAL_HEADER)); 646 InternalMmapVector<wchar_t> modname_utf16(kMaxPathLength); 647 InternalMmapVector<char> module_name(kMaxPathLength); 648 // |num_modules| is the number of modules actually present, 649 size_t num_modules = bytes_required / sizeof(HMODULE); 650 for (size_t i = 0; i < num_modules; ++i) { 651 HMODULE handle = hmodules[i]; 652 MODULEINFO mi; 653 if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) 654 continue; 655 656 // Get the UTF-16 path and convert to UTF-8. 657 int modname_utf16_len = 658 GetModuleFileNameW(handle, &modname_utf16[0], kMaxPathLength); 659 if (modname_utf16_len == 0) 660 modname_utf16[0] = '\0'; 661 int module_name_len = ::WideCharToMultiByte( 662 CP_UTF8, 0, &modname_utf16[0], modname_utf16_len + 1, &module_name[0], 663 kMaxPathLength, NULL, NULL); 664 module_name[module_name_len] = '\0'; 665 666 uptr base_address = (uptr)mi.lpBaseOfDll; 667 uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; 668 669 // Adjust the base address of the module so that we get a VA instead of an 670 // RVA when computing the module offset. This helps llvm-symbolizer find the 671 // right DWARF CU. In the common case that the image is loaded at it's 672 // preferred address, we will now print normal virtual addresses. 673 uptr preferred_base = 674 GetPreferredBase(&module_name[0], &buf[0], buf.size()); 675 uptr adjusted_base = base_address - preferred_base; 676 677 modules_.push_back(LoadedModule()); 678 LoadedModule &cur_module = modules_.back(); 679 cur_module.set(&module_name[0], adjusted_base); 680 // We add the whole module as one single address range. 681 cur_module.addAddressRange(base_address, end_address, /*executable*/ true, 682 /*writable*/ true); 683 } 684 UnmapOrDie(hmodules, modules_buffer_size); 685 } 686 687 void ListOfModules::fallbackInit() { clear(); } 688 689 // We can't use atexit() directly at __asan_init time as the CRT is not fully 690 // initialized at this point. Place the functions into a vector and use 691 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). 692 InternalMmapVectorNoCtor<void (*)(void)> atexit_functions; 693 694 int Atexit(void (*function)(void)) { 695 atexit_functions.push_back(function); 696 return 0; 697 } 698 699 static int RunAtexit() { 700 TraceLoggingUnregister(g_asan_provider); 701 int ret = 0; 702 for (uptr i = 0; i < atexit_functions.size(); ++i) { 703 ret |= atexit(atexit_functions[i]); 704 } 705 return ret; 706 } 707 708 #pragma section(".CRT$XID", long, read) 709 __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit; 710 #endif 711 712 // ------------------ sanitizer_libc.h 713 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { 714 // FIXME: Use the wide variants to handle Unicode filenames. 715 fd_t res; 716 if (mode == RdOnly) { 717 res = CreateFileA(filename, GENERIC_READ, 718 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, 719 nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); 720 } else if (mode == WrOnly) { 721 res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, 722 FILE_ATTRIBUTE_NORMAL, nullptr); 723 } else { 724 UNIMPLEMENTED(); 725 } 726 CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); 727 CHECK(res != kStderrFd || kStderrFd == kInvalidFd); 728 if (res == kInvalidFd && last_error) 729 *last_error = GetLastError(); 730 return res; 731 } 732 733 void CloseFile(fd_t fd) { 734 CloseHandle(fd); 735 } 736 737 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, 738 error_t *error_p) { 739 CHECK(fd != kInvalidFd); 740 741 // bytes_read can't be passed directly to ReadFile: 742 // uptr is unsigned long long on 64-bit Windows. 743 unsigned long num_read_long; 744 745 bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); 746 if (!success && error_p) 747 *error_p = GetLastError(); 748 if (bytes_read) 749 *bytes_read = num_read_long; 750 return success; 751 } 752 753 bool SupportsColoredOutput(fd_t fd) { 754 // FIXME: support colored output. 755 return false; 756 } 757 758 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, 759 error_t *error_p) { 760 CHECK(fd != kInvalidFd); 761 762 // Handle null optional parameters. 763 error_t dummy_error; 764 error_p = error_p ? error_p : &dummy_error; 765 uptr dummy_bytes_written; 766 bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; 767 768 // Initialize output parameters in case we fail. 769 *error_p = 0; 770 *bytes_written = 0; 771 772 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be 773 // closed, in which case this will fail. 774 if (fd == kStdoutFd || fd == kStderrFd) { 775 fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); 776 if (fd == 0) { 777 *error_p = ERROR_INVALID_HANDLE; 778 return false; 779 } 780 } 781 782 DWORD bytes_written_32; 783 if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { 784 *error_p = GetLastError(); 785 return false; 786 } else { 787 *bytes_written = bytes_written_32; 788 return true; 789 } 790 } 791 792 uptr internal_sched_yield() { 793 Sleep(0); 794 return 0; 795 } 796 797 void internal__exit(int exitcode) { 798 TraceLoggingUnregister(g_asan_provider); 799 // ExitProcess runs some finalizers, so use TerminateProcess to avoid that. 800 // The debugger doesn't stop on TerminateProcess like it does on ExitProcess, 801 // so add our own breakpoint here. 802 if (::IsDebuggerPresent()) 803 __debugbreak(); 804 TerminateProcess(GetCurrentProcess(), exitcode); 805 BUILTIN_UNREACHABLE(); 806 } 807 808 uptr internal_ftruncate(fd_t fd, uptr size) { 809 UNIMPLEMENTED(); 810 } 811 812 uptr GetRSS() { 813 PROCESS_MEMORY_COUNTERS counters; 814 if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters))) 815 return 0; 816 return counters.WorkingSetSize; 817 } 818 819 void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } 820 void internal_join_thread(void *th) { } 821 822 void FutexWait(atomic_uint32_t *p, u32 cmp) { 823 WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE); 824 } 825 826 void FutexWake(atomic_uint32_t *p, u32 count) { 827 if (count == 1) 828 WakeByAddressSingle(p); 829 else 830 WakeByAddressAll(p); 831 } 832 833 uptr GetTlsSize() { 834 return 0; 835 } 836 837 void InitTlsSize() { 838 } 839 840 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 841 uptr *tls_addr, uptr *tls_size) { 842 #if SANITIZER_GO 843 *stk_addr = 0; 844 *stk_size = 0; 845 *tls_addr = 0; 846 *tls_size = 0; 847 #else 848 uptr stack_top, stack_bottom; 849 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); 850 *stk_addr = stack_bottom; 851 *stk_size = stack_top - stack_bottom; 852 *tls_addr = 0; 853 *tls_size = 0; 854 #endif 855 } 856 857 void ReportFile::Write(const char *buffer, uptr length) { 858 SpinMutexLock l(mu); 859 ReopenIfNecessary(); 860 if (!WriteToFile(fd, buffer, length)) { 861 // stderr may be closed, but we may be able to print to the debugger 862 // instead. This is the case when launching a program from Visual Studio, 863 // and the following routine should write to its console. 864 OutputDebugStringA(buffer); 865 } 866 } 867 868 void SetAlternateSignalStack() { 869 // FIXME: Decide what to do on Windows. 870 } 871 872 void UnsetAlternateSignalStack() { 873 // FIXME: Decide what to do on Windows. 874 } 875 876 void InstallDeadlySignalHandlers(SignalHandlerType handler) { 877 (void)handler; 878 // FIXME: Decide what to do on Windows. 879 } 880 881 HandleSignalMode GetHandleSignalMode(int signum) { 882 // FIXME: Decide what to do on Windows. 883 return kHandleSignalNo; 884 } 885 886 // Check based on flags if we should handle this exception. 887 bool IsHandledDeadlyException(DWORD exceptionCode) { 888 switch (exceptionCode) { 889 case EXCEPTION_ACCESS_VIOLATION: 890 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: 891 case EXCEPTION_STACK_OVERFLOW: 892 case EXCEPTION_DATATYPE_MISALIGNMENT: 893 case EXCEPTION_IN_PAGE_ERROR: 894 return common_flags()->handle_segv; 895 case EXCEPTION_ILLEGAL_INSTRUCTION: 896 case EXCEPTION_PRIV_INSTRUCTION: 897 case EXCEPTION_BREAKPOINT: 898 return common_flags()->handle_sigill; 899 case EXCEPTION_FLT_DENORMAL_OPERAND: 900 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 901 case EXCEPTION_FLT_INEXACT_RESULT: 902 case EXCEPTION_FLT_INVALID_OPERATION: 903 case EXCEPTION_FLT_OVERFLOW: 904 case EXCEPTION_FLT_STACK_CHECK: 905 case EXCEPTION_FLT_UNDERFLOW: 906 case EXCEPTION_INT_DIVIDE_BY_ZERO: 907 case EXCEPTION_INT_OVERFLOW: 908 return common_flags()->handle_sigfpe; 909 } 910 return false; 911 } 912 913 bool IsAccessibleMemoryRange(uptr beg, uptr size) { 914 SYSTEM_INFO si; 915 GetNativeSystemInfo(&si); 916 uptr page_size = si.dwPageSize; 917 uptr page_mask = ~(page_size - 1); 918 919 for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; 920 page <= end;) { 921 MEMORY_BASIC_INFORMATION info; 922 if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) 923 return false; 924 925 if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || 926 info.Protect == PAGE_EXECUTE) 927 return false; 928 929 if (info.RegionSize == 0) 930 return false; 931 932 page += info.RegionSize; 933 } 934 935 return true; 936 } 937 938 bool SignalContext::IsStackOverflow() const { 939 return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW; 940 } 941 942 void SignalContext::InitPcSpBp() { 943 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; 944 CONTEXT *context_record = (CONTEXT *)context; 945 946 pc = (uptr)exception_record->ExceptionAddress; 947 #ifdef _WIN64 948 bp = (uptr)context_record->Rbp; 949 sp = (uptr)context_record->Rsp; 950 #else 951 bp = (uptr)context_record->Ebp; 952 sp = (uptr)context_record->Esp; 953 #endif 954 } 955 956 uptr SignalContext::GetAddress() const { 957 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; 958 if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) 959 return exception_record->ExceptionInformation[1]; 960 return (uptr)exception_record->ExceptionAddress; 961 } 962 963 bool SignalContext::IsMemoryAccess() const { 964 return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode == 965 EXCEPTION_ACCESS_VIOLATION; 966 } 967 968 bool SignalContext::IsTrueFaultingAddress() const { return true; } 969 970 SignalContext::WriteFlag SignalContext::GetWriteFlag() const { 971 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; 972 973 // The write flag is only available for access violation exceptions. 974 if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) 975 return SignalContext::UNKNOWN; 976 977 // The contents of this array are documented at 978 // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record 979 // The first element indicates read as 0, write as 1, or execute as 8. The 980 // second element is the faulting address. 981 switch (exception_record->ExceptionInformation[0]) { 982 case 0: 983 return SignalContext::READ; 984 case 1: 985 return SignalContext::WRITE; 986 case 8: 987 return SignalContext::UNKNOWN; 988 } 989 return SignalContext::UNKNOWN; 990 } 991 992 void SignalContext::DumpAllRegisters(void *context) { 993 // FIXME: Implement this. 994 } 995 996 int SignalContext::GetType() const { 997 return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode; 998 } 999 1000 const char *SignalContext::Describe() const { 1001 unsigned code = GetType(); 1002 // Get the string description of the exception if this is a known deadly 1003 // exception. 1004 switch (code) { 1005 case EXCEPTION_ACCESS_VIOLATION: 1006 return "access-violation"; 1007 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: 1008 return "array-bounds-exceeded"; 1009 case EXCEPTION_STACK_OVERFLOW: 1010 return "stack-overflow"; 1011 case EXCEPTION_DATATYPE_MISALIGNMENT: 1012 return "datatype-misalignment"; 1013 case EXCEPTION_IN_PAGE_ERROR: 1014 return "in-page-error"; 1015 case EXCEPTION_ILLEGAL_INSTRUCTION: 1016 return "illegal-instruction"; 1017 case EXCEPTION_PRIV_INSTRUCTION: 1018 return "priv-instruction"; 1019 case EXCEPTION_BREAKPOINT: 1020 return "breakpoint"; 1021 case EXCEPTION_FLT_DENORMAL_OPERAND: 1022 return "flt-denormal-operand"; 1023 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 1024 return "flt-divide-by-zero"; 1025 case EXCEPTION_FLT_INEXACT_RESULT: 1026 return "flt-inexact-result"; 1027 case EXCEPTION_FLT_INVALID_OPERATION: 1028 return "flt-invalid-operation"; 1029 case EXCEPTION_FLT_OVERFLOW: 1030 return "flt-overflow"; 1031 case EXCEPTION_FLT_STACK_CHECK: 1032 return "flt-stack-check"; 1033 case EXCEPTION_FLT_UNDERFLOW: 1034 return "flt-underflow"; 1035 case EXCEPTION_INT_DIVIDE_BY_ZERO: 1036 return "int-divide-by-zero"; 1037 case EXCEPTION_INT_OVERFLOW: 1038 return "int-overflow"; 1039 } 1040 return "unknown exception"; 1041 } 1042 1043 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { 1044 if (buf_len == 0) 1045 return 0; 1046 1047 // Get the UTF-16 path and convert to UTF-8. 1048 InternalMmapVector<wchar_t> binname_utf16(kMaxPathLength); 1049 int binname_utf16_len = 1050 GetModuleFileNameW(NULL, &binname_utf16[0], kMaxPathLength); 1051 if (binname_utf16_len == 0) { 1052 buf[0] = '\0'; 1053 return 0; 1054 } 1055 int binary_name_len = 1056 ::WideCharToMultiByte(CP_UTF8, 0, &binname_utf16[0], binname_utf16_len, 1057 buf, buf_len, NULL, NULL); 1058 if ((unsigned)binary_name_len == buf_len) 1059 --binary_name_len; 1060 buf[binary_name_len] = '\0'; 1061 return binary_name_len; 1062 } 1063 1064 uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { 1065 return ReadBinaryName(buf, buf_len); 1066 } 1067 1068 void CheckVMASize() { 1069 // Do nothing. 1070 } 1071 1072 void InitializePlatformEarly() { 1073 // Do nothing. 1074 } 1075 1076 void MaybeReexec() { 1077 // No need to re-exec on Windows. 1078 } 1079 1080 void CheckASLR() { 1081 // Do nothing 1082 } 1083 1084 void CheckMPROTECT() { 1085 // Do nothing 1086 } 1087 1088 char **GetArgv() { 1089 // FIXME: Actually implement this function. 1090 return 0; 1091 } 1092 1093 char **GetEnviron() { 1094 // FIXME: Actually implement this function. 1095 return 0; 1096 } 1097 1098 pid_t StartSubprocess(const char *program, const char *const argv[], 1099 const char *const envp[], fd_t stdin_fd, fd_t stdout_fd, 1100 fd_t stderr_fd) { 1101 // FIXME: implement on this platform 1102 // Should be implemented based on 1103 // SymbolizerProcess::StarAtSymbolizerSubprocess 1104 // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp. 1105 return -1; 1106 } 1107 1108 bool IsProcessRunning(pid_t pid) { 1109 // FIXME: implement on this platform. 1110 return false; 1111 } 1112 1113 int WaitForProcess(pid_t pid) { return -1; } 1114 1115 // FIXME implement on this platform. 1116 void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} 1117 1118 void CheckNoDeepBind(const char *filename, int flag) { 1119 // Do nothing. 1120 } 1121 1122 // FIXME: implement on this platform. 1123 bool GetRandom(void *buffer, uptr length, bool blocking) { 1124 UNIMPLEMENTED(); 1125 } 1126 1127 u32 GetNumberOfCPUs() { 1128 SYSTEM_INFO sysinfo = {}; 1129 GetNativeSystemInfo(&sysinfo); 1130 return sysinfo.dwNumberOfProcessors; 1131 } 1132 1133 #if SANITIZER_WIN_TRACE 1134 // TODO(mcgov): Rename this project-wide to PlatformLogInit 1135 void AndroidLogInit(void) { 1136 HRESULT hr = TraceLoggingRegister(g_asan_provider); 1137 if (!SUCCEEDED(hr)) 1138 return; 1139 } 1140 1141 void SetAbortMessage(const char *) {} 1142 1143 void LogFullErrorReport(const char *buffer) { 1144 if (common_flags()->log_to_syslog) { 1145 InternalMmapVector<wchar_t> filename; 1146 DWORD filename_length = 0; 1147 do { 1148 filename.resize(filename.size() + 0x100); 1149 filename_length = 1150 GetModuleFileNameW(NULL, filename.begin(), filename.size()); 1151 } while (filename_length >= filename.size()); 1152 TraceLoggingWrite(g_asan_provider, "AsanReportEvent", 1153 TraceLoggingValue(filename.begin(), "ExecutableName"), 1154 TraceLoggingValue(buffer, "AsanReportContents")); 1155 } 1156 } 1157 #endif // SANITIZER_WIN_TRACE 1158 1159 void InitializePlatformCommonFlags(CommonFlags *cf) {} 1160 1161 } // namespace __sanitizer 1162 1163 #endif // _WIN32 1164