xref: /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp (revision 1165fc9a526630487a1feb63daef65c5aee1a583)
1 //===-- sanitizer_win.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries and implements windows-specific functions from
11 // sanitizer_libc.h.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_platform.h"
15 #if SANITIZER_WINDOWS
16 
17 #define WIN32_LEAN_AND_MEAN
18 #define NOGDI
19 #include <windows.h>
20 #include <io.h>
21 #include <psapi.h>
22 #include <stdlib.h>
23 
24 #include "sanitizer_common.h"
25 #include "sanitizer_file.h"
26 #include "sanitizer_libc.h"
27 #include "sanitizer_mutex.h"
28 #include "sanitizer_placement_new.h"
29 #include "sanitizer_win_defs.h"
30 
31 #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1
32 #pragma comment(lib, "psapi")
33 #endif
34 #if SANITIZER_WIN_TRACE
35 #include <traceloggingprovider.h>
36 //  Windows trace logging provider init
37 #pragma comment(lib, "advapi32.lib")
38 TRACELOGGING_DECLARE_PROVIDER(g_asan_provider);
39 // GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp
40 TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider",
41                              (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b,
42                               0x53, 0x0b, 0xd0, 0xf3, 0xfa));
43 #else
44 #define TraceLoggingUnregister(x)
45 #endif
46 
47 // For WaitOnAddress
48 #  pragma comment(lib, "synchronization.lib")
49 
50 // A macro to tell the compiler that this part of the code cannot be reached,
51 // if the compiler supports this feature. Since we're using this in
52 // code that is called when terminating the process, the expansion of the
53 // macro should not terminate the process to avoid infinite recursion.
54 #if defined(__clang__)
55 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
56 #elif defined(__GNUC__) && \
57     (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
58 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
59 #elif defined(_MSC_VER)
60 # define BUILTIN_UNREACHABLE() __assume(0)
61 #else
62 # define BUILTIN_UNREACHABLE()
63 #endif
64 
65 namespace __sanitizer {
66 
67 #include "sanitizer_syscall_generic.inc"
68 
69 // --------------------- sanitizer_common.h
70 uptr GetPageSize() {
71   SYSTEM_INFO si;
72   GetSystemInfo(&si);
73   return si.dwPageSize;
74 }
75 
76 uptr GetMmapGranularity() {
77   SYSTEM_INFO si;
78   GetSystemInfo(&si);
79   return si.dwAllocationGranularity;
80 }
81 
82 uptr GetMaxUserVirtualAddress() {
83   SYSTEM_INFO si;
84   GetSystemInfo(&si);
85   return (uptr)si.lpMaximumApplicationAddress;
86 }
87 
88 uptr GetMaxVirtualAddress() {
89   return GetMaxUserVirtualAddress();
90 }
91 
92 bool FileExists(const char *filename) {
93   return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
94 }
95 
96 uptr internal_getpid() {
97   return GetProcessId(GetCurrentProcess());
98 }
99 
100 int internal_dlinfo(void *handle, int request, void *p) {
101   UNIMPLEMENTED();
102 }
103 
104 // In contrast to POSIX, on Windows GetCurrentThreadId()
105 // returns a system-unique identifier.
106 tid_t GetTid() {
107   return GetCurrentThreadId();
108 }
109 
110 uptr GetThreadSelf() {
111   return GetTid();
112 }
113 
114 #if !SANITIZER_GO
115 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
116                                 uptr *stack_bottom) {
117   CHECK(stack_top);
118   CHECK(stack_bottom);
119   MEMORY_BASIC_INFORMATION mbi;
120   CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
121   // FIXME: is it possible for the stack to not be a single allocation?
122   // Are these values what ASan expects to get (reserved, not committed;
123   // including stack guard page) ?
124   *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
125   *stack_bottom = (uptr)mbi.AllocationBase;
126 }
127 #endif  // #if !SANITIZER_GO
128 
129 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
130   void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
131   if (rv == 0)
132     ReportMmapFailureAndDie(size, mem_type, "allocate",
133                             GetLastError(), raw_report);
134   return rv;
135 }
136 
137 void UnmapOrDie(void *addr, uptr size) {
138   if (!size || !addr)
139     return;
140 
141   MEMORY_BASIC_INFORMATION mbi;
142   CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
143 
144   // MEM_RELEASE can only be used to unmap whole regions previously mapped with
145   // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
146   // fails try MEM_DECOMMIT.
147   if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
148     if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
149       Report("ERROR: %s failed to "
150              "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
151              SanitizerToolName, size, size, addr, GetLastError());
152       CHECK("unable to unmap" && 0);
153     }
154   }
155 }
156 
157 static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
158                                      const char *mmap_type) {
159   error_t last_error = GetLastError();
160   if (last_error == ERROR_NOT_ENOUGH_MEMORY)
161     return nullptr;
162   ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
163 }
164 
165 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
166   void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
167   if (rv == 0)
168     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
169   return rv;
170 }
171 
172 // We want to map a chunk of address space aligned to 'alignment'.
173 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
174                                    const char *mem_type) {
175   CHECK(IsPowerOfTwo(size));
176   CHECK(IsPowerOfTwo(alignment));
177 
178   // Windows will align our allocations to at least 64K.
179   alignment = Max(alignment, GetMmapGranularity());
180 
181   uptr mapped_addr =
182       (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
183   if (!mapped_addr)
184     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
185 
186   // If we got it right on the first try, return. Otherwise, unmap it and go to
187   // the slow path.
188   if (IsAligned(mapped_addr, alignment))
189     return (void*)mapped_addr;
190   if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
191     ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
192 
193   // If we didn't get an aligned address, overallocate, find an aligned address,
194   // unmap, and try to allocate at that aligned address.
195   int retries = 0;
196   const int kMaxRetries = 10;
197   for (; retries < kMaxRetries &&
198          (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
199        retries++) {
200     // Overallocate size + alignment bytes.
201     mapped_addr =
202         (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
203     if (!mapped_addr)
204       return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
205 
206     // Find the aligned address.
207     uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
208 
209     // Free the overallocation.
210     if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
211       ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
212 
213     // Attempt to allocate exactly the number of bytes we need at the aligned
214     // address. This may fail for a number of reasons, in which case we continue
215     // the loop.
216     mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
217                                      MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
218   }
219 
220   // Fail if we can't make this work quickly.
221   if (retries == kMaxRetries && mapped_addr == 0)
222     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
223 
224   return (void *)mapped_addr;
225 }
226 
227 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
228   // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
229   // but on Win64 it does.
230   (void)name;  // unsupported
231 #if !SANITIZER_GO && SANITIZER_WINDOWS64
232   // On asan/Windows64, use MEM_COMMIT would result in error
233   // 1455:ERROR_COMMITMENT_LIMIT.
234   // Asan uses exception handler to commit page on demand.
235   void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
236 #else
237   void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
238                          PAGE_READWRITE);
239 #endif
240   if (p == 0) {
241     Report("ERROR: %s failed to "
242            "allocate %p (%zd) bytes at %p (error code: %d)\n",
243            SanitizerToolName, size, size, fixed_addr, GetLastError());
244     return false;
245   }
246   return true;
247 }
248 
249 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) {
250   // FIXME: Windows support large pages too. Might be worth checking
251   return MmapFixedNoReserve(fixed_addr, size, name);
252 }
253 
254 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by
255 // 'MmapFixedNoAccess'.
256 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
257   void *p = VirtualAlloc((LPVOID)fixed_addr, size,
258       MEM_COMMIT, PAGE_READWRITE);
259   if (p == 0) {
260     char mem_type[30];
261     internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
262                       fixed_addr);
263     ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
264   }
265   return p;
266 }
267 
268 // Uses fixed_addr for now.
269 // Will use offset instead once we've implemented this function for real.
270 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
271   return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
272 }
273 
274 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
275                                     const char *name) {
276   return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
277 }
278 
279 void ReservedAddressRange::Unmap(uptr addr, uptr size) {
280   // Only unmap if it covers the entire range.
281   CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_));
282   // We unmap the whole range, just null out the base.
283   base_ = nullptr;
284   size_ = 0;
285   UnmapOrDie(reinterpret_cast<void*>(addr), size);
286 }
287 
288 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
289   void *p = VirtualAlloc((LPVOID)fixed_addr, size,
290       MEM_COMMIT, PAGE_READWRITE);
291   if (p == 0) {
292     char mem_type[30];
293     internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
294                       fixed_addr);
295     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
296   }
297   return p;
298 }
299 
300 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
301   // FIXME: make this really NoReserve?
302   return MmapOrDie(size, mem_type);
303 }
304 
305 uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
306   base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size);
307   size_ = size;
308   name_ = name;
309   (void)os_handle_;  // unsupported
310   return reinterpret_cast<uptr>(base_);
311 }
312 
313 
314 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
315   (void)name; // unsupported
316   void *res = VirtualAlloc((LPVOID)fixed_addr, size,
317                            MEM_RESERVE, PAGE_NOACCESS);
318   if (res == 0)
319     Report("WARNING: %s failed to "
320            "mprotect %p (%zd) bytes at %p (error code: %d)\n",
321            SanitizerToolName, size, size, fixed_addr, GetLastError());
322   return res;
323 }
324 
325 void *MmapNoAccess(uptr size) {
326   void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
327   if (res == 0)
328     Report("WARNING: %s failed to "
329            "mprotect %p (%zd) bytes (error code: %d)\n",
330            SanitizerToolName, size, size, GetLastError());
331   return res;
332 }
333 
334 bool MprotectNoAccess(uptr addr, uptr size) {
335   DWORD old_protection;
336   return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
337 }
338 
339 bool MprotectReadOnly(uptr addr, uptr size) {
340   DWORD old_protection;
341   return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection);
342 }
343 
344 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
345   uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()),
346        end_aligned = RoundDownTo(end, GetPageSizeCached());
347   CHECK(beg < end);                // make sure the region is sane
348   if (beg_aligned == end_aligned)  // make sure we're freeing at least 1 page;
349     return;
350   UnmapOrDie((void *)beg, end_aligned - beg_aligned);
351 }
352 
353 void SetShadowRegionHugePageMode(uptr addr, uptr size) {
354   // FIXME: probably similar to ReleaseMemoryToOS.
355 }
356 
357 bool DontDumpShadowMemory(uptr addr, uptr length) {
358   // This is almost useless on 32-bits.
359   // FIXME: add madvise-analog when we move to 64-bits.
360   return true;
361 }
362 
363 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
364                       uptr min_shadow_base_alignment,
365                       UNUSED uptr &high_mem_end) {
366   const uptr granularity = GetMmapGranularity();
367   const uptr alignment =
368       Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
369   const uptr left_padding =
370       Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
371   uptr space_size = shadow_size_bytes + left_padding;
372   uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
373                                                granularity, nullptr, nullptr);
374   CHECK_NE((uptr)0, shadow_start);
375   CHECK(IsAligned(shadow_start, alignment));
376   return shadow_start;
377 }
378 
379 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
380                               uptr *largest_gap_found,
381                               uptr *max_occupied_addr) {
382   uptr address = 0;
383   while (true) {
384     MEMORY_BASIC_INFORMATION info;
385     if (!::VirtualQuery((void*)address, &info, sizeof(info)))
386       return 0;
387 
388     if (info.State == MEM_FREE) {
389       uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
390                                       alignment);
391       if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
392         return shadow_address;
393     }
394 
395     // Move to the next region.
396     address = (uptr)info.BaseAddress + info.RegionSize;
397   }
398   return 0;
399 }
400 
401 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
402                                 uptr num_aliases, uptr ring_buffer_size) {
403   CHECK(false && "HWASan aliasing is unimplemented on Windows");
404   return 0;
405 }
406 
407 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
408   MEMORY_BASIC_INFORMATION mbi;
409   CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
410   return mbi.Protect == PAGE_NOACCESS &&
411          (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
412 }
413 
414 void *MapFileToMemory(const char *file_name, uptr *buff_size) {
415   UNIMPLEMENTED();
416 }
417 
418 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
419   UNIMPLEMENTED();
420 }
421 
422 static const int kMaxEnvNameLength = 128;
423 static const DWORD kMaxEnvValueLength = 32767;
424 
425 namespace {
426 
427 struct EnvVariable {
428   char name[kMaxEnvNameLength];
429   char value[kMaxEnvValueLength];
430 };
431 
432 }  // namespace
433 
434 static const int kEnvVariables = 5;
435 static EnvVariable env_vars[kEnvVariables];
436 static int num_env_vars;
437 
438 const char *GetEnv(const char *name) {
439   // Note: this implementation caches the values of the environment variables
440   // and limits their quantity.
441   for (int i = 0; i < num_env_vars; i++) {
442     if (0 == internal_strcmp(name, env_vars[i].name))
443       return env_vars[i].value;
444   }
445   CHECK_LT(num_env_vars, kEnvVariables);
446   DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
447                                      kMaxEnvValueLength);
448   if (rv > 0 && rv < kMaxEnvValueLength) {
449     CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
450     internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
451     num_env_vars++;
452     return env_vars[num_env_vars - 1].value;
453   }
454   return 0;
455 }
456 
457 const char *GetPwd() {
458   UNIMPLEMENTED();
459 }
460 
461 u32 GetUid() {
462   UNIMPLEMENTED();
463 }
464 
465 namespace {
466 struct ModuleInfo {
467   const char *filepath;
468   uptr base_address;
469   uptr end_address;
470 };
471 
472 #if !SANITIZER_GO
473 int CompareModulesBase(const void *pl, const void *pr) {
474   const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr;
475   if (l->base_address < r->base_address)
476     return -1;
477   return l->base_address > r->base_address;
478 }
479 #endif
480 }  // namespace
481 
482 #if !SANITIZER_GO
483 void DumpProcessMap() {
484   Report("Dumping process modules:\n");
485   ListOfModules modules;
486   modules.init();
487   uptr num_modules = modules.size();
488 
489   InternalMmapVector<ModuleInfo> module_infos(num_modules);
490   for (size_t i = 0; i < num_modules; ++i) {
491     module_infos[i].filepath = modules[i].full_name();
492     module_infos[i].base_address = modules[i].ranges().front()->beg;
493     module_infos[i].end_address = modules[i].ranges().back()->end;
494   }
495   qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
496         CompareModulesBase);
497 
498   for (size_t i = 0; i < num_modules; ++i) {
499     const ModuleInfo &mi = module_infos[i];
500     if (mi.end_address != 0) {
501       Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
502              mi.filepath[0] ? mi.filepath : "[no name]");
503     } else if (mi.filepath[0]) {
504       Printf("\t??\?-??? %s\n", mi.filepath);
505     } else {
506       Printf("\t???\n");
507     }
508   }
509 }
510 #endif
511 
512 void DisableCoreDumperIfNecessary() {
513   // Do nothing.
514 }
515 
516 void ReExec() {
517   UNIMPLEMENTED();
518 }
519 
520 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
521 
522 bool StackSizeIsUnlimited() {
523   UNIMPLEMENTED();
524 }
525 
526 void SetStackSizeLimitInBytes(uptr limit) {
527   UNIMPLEMENTED();
528 }
529 
530 bool AddressSpaceIsUnlimited() {
531   UNIMPLEMENTED();
532 }
533 
534 void SetAddressSpaceUnlimited() {
535   UNIMPLEMENTED();
536 }
537 
538 bool IsPathSeparator(const char c) {
539   return c == '\\' || c == '/';
540 }
541 
542 static bool IsAlpha(char c) {
543   c = ToLower(c);
544   return c >= 'a' && c <= 'z';
545 }
546 
547 bool IsAbsolutePath(const char *path) {
548   return path != nullptr && IsAlpha(path[0]) && path[1] == ':' &&
549          IsPathSeparator(path[2]);
550 }
551 
552 void internal_usleep(u64 useconds) { Sleep(useconds / 1000); }
553 
554 u64 NanoTime() {
555   static LARGE_INTEGER frequency = {};
556   LARGE_INTEGER counter;
557   if (UNLIKELY(frequency.QuadPart == 0)) {
558     QueryPerformanceFrequency(&frequency);
559     CHECK_NE(frequency.QuadPart, 0);
560   }
561   QueryPerformanceCounter(&counter);
562   counter.QuadPart *= 1000ULL * 1000000ULL;
563   counter.QuadPart /= frequency.QuadPart;
564   return counter.QuadPart;
565 }
566 
567 u64 MonotonicNanoTime() { return NanoTime(); }
568 
569 void Abort() {
570   internal__exit(3);
571 }
572 
573 bool CreateDir(const char *pathname) {
574   return CreateDirectoryA(pathname, nullptr) != 0;
575 }
576 
577 #if !SANITIZER_GO
578 // Read the file to extract the ImageBase field from the PE header. If ASLR is
579 // disabled and this virtual address is available, the loader will typically
580 // load the image at this address. Therefore, we call it the preferred base. Any
581 // addresses in the DWARF typically assume that the object has been loaded at
582 // this address.
583 static uptr GetPreferredBase(const char *modname, char *buf, size_t buf_size) {
584   fd_t fd = OpenFile(modname, RdOnly, nullptr);
585   if (fd == kInvalidFd)
586     return 0;
587   FileCloser closer(fd);
588 
589   // Read just the DOS header.
590   IMAGE_DOS_HEADER dos_header;
591   uptr bytes_read;
592   if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
593       bytes_read != sizeof(dos_header))
594     return 0;
595 
596   // The file should start with the right signature.
597   if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
598     return 0;
599 
600   // The layout at e_lfanew is:
601   // "PE\0\0"
602   // IMAGE_FILE_HEADER
603   // IMAGE_OPTIONAL_HEADER
604   // Seek to e_lfanew and read all that data.
605   if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
606       INVALID_SET_FILE_POINTER)
607     return 0;
608   if (!ReadFromFile(fd, buf, buf_size, &bytes_read) || bytes_read != buf_size)
609     return 0;
610 
611   // Check for "PE\0\0" before the PE header.
612   char *pe_sig = &buf[0];
613   if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
614     return 0;
615 
616   // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
617   IMAGE_OPTIONAL_HEADER *pe_header =
618       (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
619 
620   // Check for more magic in the PE header.
621   if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
622     return 0;
623 
624   // Finally, return the ImageBase.
625   return (uptr)pe_header->ImageBase;
626 }
627 
628 void ListOfModules::init() {
629   clearOrInit();
630   HANDLE cur_process = GetCurrentProcess();
631 
632   // Query the list of modules.  Start by assuming there are no more than 256
633   // modules and retry if that's not sufficient.
634   HMODULE *hmodules = 0;
635   uptr modules_buffer_size = sizeof(HMODULE) * 256;
636   DWORD bytes_required;
637   while (!hmodules) {
638     hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
639     CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
640                              &bytes_required));
641     if (bytes_required > modules_buffer_size) {
642       // Either there turned out to be more than 256 hmodules, or new hmodules
643       // could have loaded since the last try.  Retry.
644       UnmapOrDie(hmodules, modules_buffer_size);
645       hmodules = 0;
646       modules_buffer_size = bytes_required;
647     }
648   }
649 
650   InternalMmapVector<char> buf(4 + sizeof(IMAGE_FILE_HEADER) +
651                                sizeof(IMAGE_OPTIONAL_HEADER));
652   InternalMmapVector<wchar_t> modname_utf16(kMaxPathLength);
653   InternalMmapVector<char> module_name(kMaxPathLength);
654   // |num_modules| is the number of modules actually present,
655   size_t num_modules = bytes_required / sizeof(HMODULE);
656   for (size_t i = 0; i < num_modules; ++i) {
657     HMODULE handle = hmodules[i];
658     MODULEINFO mi;
659     if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
660       continue;
661 
662     // Get the UTF-16 path and convert to UTF-8.
663     int modname_utf16_len =
664         GetModuleFileNameW(handle, &modname_utf16[0], kMaxPathLength);
665     if (modname_utf16_len == 0)
666       modname_utf16[0] = '\0';
667     int module_name_len = ::WideCharToMultiByte(
668         CP_UTF8, 0, &modname_utf16[0], modname_utf16_len + 1, &module_name[0],
669         kMaxPathLength, NULL, NULL);
670     module_name[module_name_len] = '\0';
671 
672     uptr base_address = (uptr)mi.lpBaseOfDll;
673     uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
674 
675     // Adjust the base address of the module so that we get a VA instead of an
676     // RVA when computing the module offset. This helps llvm-symbolizer find the
677     // right DWARF CU. In the common case that the image is loaded at it's
678     // preferred address, we will now print normal virtual addresses.
679     uptr preferred_base =
680         GetPreferredBase(&module_name[0], &buf[0], buf.size());
681     uptr adjusted_base = base_address - preferred_base;
682 
683     modules_.push_back(LoadedModule());
684     LoadedModule &cur_module = modules_.back();
685     cur_module.set(&module_name[0], adjusted_base);
686     // We add the whole module as one single address range.
687     cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
688                                /*writable*/ true);
689   }
690   UnmapOrDie(hmodules, modules_buffer_size);
691 }
692 
693 void ListOfModules::fallbackInit() { clear(); }
694 
695 // We can't use atexit() directly at __asan_init time as the CRT is not fully
696 // initialized at this point.  Place the functions into a vector and use
697 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
698 InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
699 
700 int Atexit(void (*function)(void)) {
701   atexit_functions.push_back(function);
702   return 0;
703 }
704 
705 static int RunAtexit() {
706   TraceLoggingUnregister(g_asan_provider);
707   int ret = 0;
708   for (uptr i = 0; i < atexit_functions.size(); ++i) {
709     ret |= atexit(atexit_functions[i]);
710   }
711   return ret;
712 }
713 
714 #pragma section(".CRT$XID", long, read)
715 __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
716 #endif
717 
718 // ------------------ sanitizer_libc.h
719 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
720   // FIXME: Use the wide variants to handle Unicode filenames.
721   fd_t res;
722   if (mode == RdOnly) {
723     res = CreateFileA(filename, GENERIC_READ,
724                       FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
725                       nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
726   } else if (mode == WrOnly) {
727     res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
728                       FILE_ATTRIBUTE_NORMAL, nullptr);
729   } else {
730     UNIMPLEMENTED();
731   }
732   CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
733   CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
734   if (res == kInvalidFd && last_error)
735     *last_error = GetLastError();
736   return res;
737 }
738 
739 void CloseFile(fd_t fd) {
740   CloseHandle(fd);
741 }
742 
743 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
744                   error_t *error_p) {
745   CHECK(fd != kInvalidFd);
746 
747   // bytes_read can't be passed directly to ReadFile:
748   // uptr is unsigned long long on 64-bit Windows.
749   unsigned long num_read_long;
750 
751   bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
752   if (!success && error_p)
753     *error_p = GetLastError();
754   if (bytes_read)
755     *bytes_read = num_read_long;
756   return success;
757 }
758 
759 bool SupportsColoredOutput(fd_t fd) {
760   // FIXME: support colored output.
761   return false;
762 }
763 
764 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
765                  error_t *error_p) {
766   CHECK(fd != kInvalidFd);
767 
768   // Handle null optional parameters.
769   error_t dummy_error;
770   error_p = error_p ? error_p : &dummy_error;
771   uptr dummy_bytes_written;
772   bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
773 
774   // Initialize output parameters in case we fail.
775   *error_p = 0;
776   *bytes_written = 0;
777 
778   // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
779   // closed, in which case this will fail.
780   if (fd == kStdoutFd || fd == kStderrFd) {
781     fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
782     if (fd == 0) {
783       *error_p = ERROR_INVALID_HANDLE;
784       return false;
785     }
786   }
787 
788   DWORD bytes_written_32;
789   if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
790     *error_p = GetLastError();
791     return false;
792   } else {
793     *bytes_written = bytes_written_32;
794     return true;
795   }
796 }
797 
798 uptr internal_sched_yield() {
799   Sleep(0);
800   return 0;
801 }
802 
803 void internal__exit(int exitcode) {
804   TraceLoggingUnregister(g_asan_provider);
805   // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
806   // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
807   // so add our own breakpoint here.
808   if (::IsDebuggerPresent())
809     __debugbreak();
810   TerminateProcess(GetCurrentProcess(), exitcode);
811   BUILTIN_UNREACHABLE();
812 }
813 
814 uptr internal_ftruncate(fd_t fd, uptr size) {
815   UNIMPLEMENTED();
816 }
817 
818 uptr GetRSS() {
819   PROCESS_MEMORY_COUNTERS counters;
820   if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)))
821     return 0;
822   return counters.WorkingSetSize;
823 }
824 
825 void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
826 void internal_join_thread(void *th) { }
827 
828 void FutexWait(atomic_uint32_t *p, u32 cmp) {
829   WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE);
830 }
831 
832 void FutexWake(atomic_uint32_t *p, u32 count) {
833   if (count == 1)
834     WakeByAddressSingle(p);
835   else
836     WakeByAddressAll(p);
837 }
838 
839 uptr GetTlsSize() {
840   return 0;
841 }
842 
843 void InitTlsSize() {
844 }
845 
846 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
847                           uptr *tls_addr, uptr *tls_size) {
848 #if SANITIZER_GO
849   *stk_addr = 0;
850   *stk_size = 0;
851   *tls_addr = 0;
852   *tls_size = 0;
853 #else
854   uptr stack_top, stack_bottom;
855   GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
856   *stk_addr = stack_bottom;
857   *stk_size = stack_top - stack_bottom;
858   *tls_addr = 0;
859   *tls_size = 0;
860 #endif
861 }
862 
863 void ReportFile::Write(const char *buffer, uptr length) {
864   SpinMutexLock l(mu);
865   ReopenIfNecessary();
866   if (!WriteToFile(fd, buffer, length)) {
867     // stderr may be closed, but we may be able to print to the debugger
868     // instead.  This is the case when launching a program from Visual Studio,
869     // and the following routine should write to its console.
870     OutputDebugStringA(buffer);
871   }
872 }
873 
874 void SetAlternateSignalStack() {
875   // FIXME: Decide what to do on Windows.
876 }
877 
878 void UnsetAlternateSignalStack() {
879   // FIXME: Decide what to do on Windows.
880 }
881 
882 void InstallDeadlySignalHandlers(SignalHandlerType handler) {
883   (void)handler;
884   // FIXME: Decide what to do on Windows.
885 }
886 
887 HandleSignalMode GetHandleSignalMode(int signum) {
888   // FIXME: Decide what to do on Windows.
889   return kHandleSignalNo;
890 }
891 
892 // Check based on flags if we should handle this exception.
893 bool IsHandledDeadlyException(DWORD exceptionCode) {
894   switch (exceptionCode) {
895     case EXCEPTION_ACCESS_VIOLATION:
896     case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
897     case EXCEPTION_STACK_OVERFLOW:
898     case EXCEPTION_DATATYPE_MISALIGNMENT:
899     case EXCEPTION_IN_PAGE_ERROR:
900       return common_flags()->handle_segv;
901     case EXCEPTION_ILLEGAL_INSTRUCTION:
902     case EXCEPTION_PRIV_INSTRUCTION:
903     case EXCEPTION_BREAKPOINT:
904       return common_flags()->handle_sigill;
905     case EXCEPTION_FLT_DENORMAL_OPERAND:
906     case EXCEPTION_FLT_DIVIDE_BY_ZERO:
907     case EXCEPTION_FLT_INEXACT_RESULT:
908     case EXCEPTION_FLT_INVALID_OPERATION:
909     case EXCEPTION_FLT_OVERFLOW:
910     case EXCEPTION_FLT_STACK_CHECK:
911     case EXCEPTION_FLT_UNDERFLOW:
912     case EXCEPTION_INT_DIVIDE_BY_ZERO:
913     case EXCEPTION_INT_OVERFLOW:
914       return common_flags()->handle_sigfpe;
915   }
916   return false;
917 }
918 
919 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
920   SYSTEM_INFO si;
921   GetNativeSystemInfo(&si);
922   uptr page_size = si.dwPageSize;
923   uptr page_mask = ~(page_size - 1);
924 
925   for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
926        page <= end;) {
927     MEMORY_BASIC_INFORMATION info;
928     if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
929       return false;
930 
931     if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
932         info.Protect == PAGE_EXECUTE)
933       return false;
934 
935     if (info.RegionSize == 0)
936       return false;
937 
938     page += info.RegionSize;
939   }
940 
941   return true;
942 }
943 
944 bool SignalContext::IsStackOverflow() const {
945   return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW;
946 }
947 
948 void SignalContext::InitPcSpBp() {
949   EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
950   CONTEXT *context_record = (CONTEXT *)context;
951 
952   pc = (uptr)exception_record->ExceptionAddress;
953 #  if SANITIZER_WINDOWS64
954 #    if SANITIZER_ARM64
955   bp = (uptr)context_record->Fp;
956   sp = (uptr)context_record->Sp;
957 #    else
958   bp = (uptr)context_record->Rbp;
959   sp = (uptr)context_record->Rsp;
960 #    endif
961 #  else
962   bp = (uptr)context_record->Ebp;
963   sp = (uptr)context_record->Esp;
964 #  endif
965 }
966 
967 uptr SignalContext::GetAddress() const {
968   EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
969   if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
970     return exception_record->ExceptionInformation[1];
971   return (uptr)exception_record->ExceptionAddress;
972 }
973 
974 bool SignalContext::IsMemoryAccess() const {
975   return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode ==
976          EXCEPTION_ACCESS_VIOLATION;
977 }
978 
979 bool SignalContext::IsTrueFaultingAddress() const { return true; }
980 
981 SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
982   EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
983 
984   // The write flag is only available for access violation exceptions.
985   if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
986     return SignalContext::Unknown;
987 
988   // The contents of this array are documented at
989   // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
990   // The first element indicates read as 0, write as 1, or execute as 8.  The
991   // second element is the faulting address.
992   switch (exception_record->ExceptionInformation[0]) {
993     case 0:
994       return SignalContext::Read;
995     case 1:
996       return SignalContext::Write;
997     case 8:
998       return SignalContext::Unknown;
999   }
1000   return SignalContext::Unknown;
1001 }
1002 
1003 void SignalContext::DumpAllRegisters(void *context) {
1004   // FIXME: Implement this.
1005 }
1006 
1007 int SignalContext::GetType() const {
1008   return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode;
1009 }
1010 
1011 const char *SignalContext::Describe() const {
1012   unsigned code = GetType();
1013   // Get the string description of the exception if this is a known deadly
1014   // exception.
1015   switch (code) {
1016     case EXCEPTION_ACCESS_VIOLATION:
1017       return "access-violation";
1018     case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
1019       return "array-bounds-exceeded";
1020     case EXCEPTION_STACK_OVERFLOW:
1021       return "stack-overflow";
1022     case EXCEPTION_DATATYPE_MISALIGNMENT:
1023       return "datatype-misalignment";
1024     case EXCEPTION_IN_PAGE_ERROR:
1025       return "in-page-error";
1026     case EXCEPTION_ILLEGAL_INSTRUCTION:
1027       return "illegal-instruction";
1028     case EXCEPTION_PRIV_INSTRUCTION:
1029       return "priv-instruction";
1030     case EXCEPTION_BREAKPOINT:
1031       return "breakpoint";
1032     case EXCEPTION_FLT_DENORMAL_OPERAND:
1033       return "flt-denormal-operand";
1034     case EXCEPTION_FLT_DIVIDE_BY_ZERO:
1035       return "flt-divide-by-zero";
1036     case EXCEPTION_FLT_INEXACT_RESULT:
1037       return "flt-inexact-result";
1038     case EXCEPTION_FLT_INVALID_OPERATION:
1039       return "flt-invalid-operation";
1040     case EXCEPTION_FLT_OVERFLOW:
1041       return "flt-overflow";
1042     case EXCEPTION_FLT_STACK_CHECK:
1043       return "flt-stack-check";
1044     case EXCEPTION_FLT_UNDERFLOW:
1045       return "flt-underflow";
1046     case EXCEPTION_INT_DIVIDE_BY_ZERO:
1047       return "int-divide-by-zero";
1048     case EXCEPTION_INT_OVERFLOW:
1049       return "int-overflow";
1050   }
1051   return "unknown exception";
1052 }
1053 
1054 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
1055   if (buf_len == 0)
1056     return 0;
1057 
1058   // Get the UTF-16 path and convert to UTF-8.
1059   InternalMmapVector<wchar_t> binname_utf16(kMaxPathLength);
1060   int binname_utf16_len =
1061       GetModuleFileNameW(NULL, &binname_utf16[0], kMaxPathLength);
1062   if (binname_utf16_len == 0) {
1063     buf[0] = '\0';
1064     return 0;
1065   }
1066   int binary_name_len =
1067       ::WideCharToMultiByte(CP_UTF8, 0, &binname_utf16[0], binname_utf16_len,
1068                             buf, buf_len, NULL, NULL);
1069   if ((unsigned)binary_name_len == buf_len)
1070     --binary_name_len;
1071   buf[binary_name_len] = '\0';
1072   return binary_name_len;
1073 }
1074 
1075 uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
1076   return ReadBinaryName(buf, buf_len);
1077 }
1078 
1079 void CheckVMASize() {
1080   // Do nothing.
1081 }
1082 
1083 void InitializePlatformEarly() {
1084   // Do nothing.
1085 }
1086 
1087 void MaybeReexec() {
1088   // No need to re-exec on Windows.
1089 }
1090 
1091 void CheckASLR() {
1092   // Do nothing
1093 }
1094 
1095 void CheckMPROTECT() {
1096   // Do nothing
1097 }
1098 
1099 char **GetArgv() {
1100   // FIXME: Actually implement this function.
1101   return 0;
1102 }
1103 
1104 char **GetEnviron() {
1105   // FIXME: Actually implement this function.
1106   return 0;
1107 }
1108 
1109 pid_t StartSubprocess(const char *program, const char *const argv[],
1110                       const char *const envp[], fd_t stdin_fd, fd_t stdout_fd,
1111                       fd_t stderr_fd) {
1112   // FIXME: implement on this platform
1113   // Should be implemented based on
1114   // SymbolizerProcess::StarAtSymbolizerSubprocess
1115   // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp.
1116   return -1;
1117 }
1118 
1119 bool IsProcessRunning(pid_t pid) {
1120   // FIXME: implement on this platform.
1121   return false;
1122 }
1123 
1124 int WaitForProcess(pid_t pid) { return -1; }
1125 
1126 // FIXME implement on this platform.
1127 void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
1128 
1129 void CheckNoDeepBind(const char *filename, int flag) {
1130   // Do nothing.
1131 }
1132 
1133 // FIXME: implement on this platform.
1134 bool GetRandom(void *buffer, uptr length, bool blocking) {
1135   UNIMPLEMENTED();
1136 }
1137 
1138 u32 GetNumberOfCPUs() {
1139   SYSTEM_INFO sysinfo = {};
1140   GetNativeSystemInfo(&sysinfo);
1141   return sysinfo.dwNumberOfProcessors;
1142 }
1143 
1144 #if SANITIZER_WIN_TRACE
1145 // TODO(mcgov): Rename this project-wide to PlatformLogInit
1146 void AndroidLogInit(void) {
1147   HRESULT hr = TraceLoggingRegister(g_asan_provider);
1148   if (!SUCCEEDED(hr))
1149     return;
1150 }
1151 
1152 void SetAbortMessage(const char *) {}
1153 
1154 void LogFullErrorReport(const char *buffer) {
1155   if (common_flags()->log_to_syslog) {
1156     InternalMmapVector<wchar_t> filename;
1157     DWORD filename_length = 0;
1158     do {
1159       filename.resize(filename.size() + 0x100);
1160       filename_length =
1161           GetModuleFileNameW(NULL, filename.begin(), filename.size());
1162     } while (filename_length >= filename.size());
1163     TraceLoggingWrite(g_asan_provider, "AsanReportEvent",
1164                       TraceLoggingValue(filename.begin(), "ExecutableName"),
1165                       TraceLoggingValue(buffer, "AsanReportContents"));
1166   }
1167 }
1168 #endif // SANITIZER_WIN_TRACE
1169 
1170 void InitializePlatformCommonFlags(CommonFlags *cf) {}
1171 
1172 }  // namespace __sanitizer
1173 
1174 #endif  // _WIN32
1175