1 //=-- lsan_common_fuchsia.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===---------------------------------------------------------------------===// 8 // 9 // This file is a part of LeakSanitizer. 10 // Implementation of common leak checking functionality. Fuchsia-specific code. 11 // 12 //===---------------------------------------------------------------------===// 13 14 #include "lsan_common.h" 15 #include "sanitizer_common/sanitizer_platform.h" 16 17 #if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA 18 #include <zircon/sanitizer.h> 19 20 #include "lsan_allocator.h" 21 #include "sanitizer_common/sanitizer_flags.h" 22 #include "sanitizer_common/sanitizer_stoptheworld_fuchsia.h" 23 #include "sanitizer_common/sanitizer_thread_registry.h" 24 25 // Ensure that the Zircon system ABI is linked in. 26 #pragma comment(lib, "zircon") 27 28 namespace __lsan { 29 30 void InitializePlatformSpecificModules() {} 31 32 LoadedModule *GetLinker() { return nullptr; } 33 34 __attribute__((tls_model("initial-exec"))) THREADLOCAL int disable_counter; 35 bool DisabledInThisThread() { return disable_counter > 0; } 36 void DisableInThisThread() { disable_counter++; } 37 void EnableInThisThread() { 38 if (disable_counter == 0) { 39 DisableCounterUnderflow(); 40 } 41 disable_counter--; 42 } 43 44 // There is nothing left to do after the globals callbacks. 45 void ProcessGlobalRegions(Frontier *frontier) {} 46 47 // Nothing to do here. 48 void ProcessPlatformSpecificAllocations(Frontier *frontier) {} 49 50 // On Fuchsia, we can intercept _Exit gracefully, and return a failing exit 51 // code if required at that point. Calling Die() here is undefined 52 // behavior and causes rare race conditions. 53 void HandleLeaks() {} 54 55 int ExitHook(int status) { 56 return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status; 57 } 58 59 void LockStuffAndStopTheWorld(StopTheWorldCallback callback, 60 CheckForLeaksParam *argument) { 61 LockThreadRegistry(); 62 LockAllocator(); 63 64 struct Params { 65 InternalMmapVector<uptr> allocator_caches; 66 StopTheWorldCallback callback; 67 CheckForLeaksParam *argument; 68 } params = {{}, callback, argument}; 69 70 // Callback from libc for globals (data/bss modulo relro), when enabled. 71 auto globals = +[](void *chunk, size_t size, void *data) { 72 auto params = static_cast<const Params *>(data); 73 uptr begin = reinterpret_cast<uptr>(chunk); 74 uptr end = begin + size; 75 ScanGlobalRange(begin, end, ¶ms->argument->frontier); 76 }; 77 78 // Callback from libc for thread stacks. 79 auto stacks = +[](void *chunk, size_t size, void *data) { 80 auto params = static_cast<const Params *>(data); 81 uptr begin = reinterpret_cast<uptr>(chunk); 82 uptr end = begin + size; 83 ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "STACK", 84 kReachable); 85 }; 86 87 // Callback from libc for thread registers. 88 auto registers = +[](void *chunk, size_t size, void *data) { 89 auto params = static_cast<const Params *>(data); 90 uptr begin = reinterpret_cast<uptr>(chunk); 91 uptr end = begin + size; 92 ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "REGISTERS", 93 kReachable); 94 }; 95 96 if (flags()->use_tls) { 97 // Collect the allocator cache range from each thread so these 98 // can all be excluded from the reported TLS ranges. 99 GetAllThreadAllocatorCachesLocked(¶ms.allocator_caches); 100 __sanitizer::Sort(params.allocator_caches.data(), 101 params.allocator_caches.size()); 102 } 103 104 // Callback from libc for TLS regions. This includes thread_local 105 // variables as well as C11 tss_set and POSIX pthread_setspecific. 106 auto tls = +[](void *chunk, size_t size, void *data) { 107 auto params = static_cast<const Params *>(data); 108 uptr begin = reinterpret_cast<uptr>(chunk); 109 uptr end = begin + size; 110 auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin); 111 if (i < params->allocator_caches.size() && 112 params->allocator_caches[i] >= begin && 113 end - params->allocator_caches[i] <= sizeof(AllocatorCache)) { 114 // Split the range in two and omit the allocator cache within. 115 ScanRangeForPointers(begin, params->allocator_caches[i], 116 ¶ms->argument->frontier, "TLS", kReachable); 117 uptr begin2 = params->allocator_caches[i] + sizeof(AllocatorCache); 118 ScanRangeForPointers(begin2, end, ¶ms->argument->frontier, "TLS", 119 kReachable); 120 } else { 121 ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "TLS", 122 kReachable); 123 } 124 }; 125 126 // This stops the world and then makes callbacks for various memory regions. 127 // The final callback is the last thing before the world starts up again. 128 __sanitizer_memory_snapshot( 129 flags()->use_globals ? globals : nullptr, 130 flags()->use_stacks ? stacks : nullptr, 131 flags()->use_registers ? registers : nullptr, 132 flags()->use_tls ? tls : nullptr, 133 [](zx_status_t, void *data) { 134 auto params = static_cast<const Params *>(data); 135 136 // We don't use the thread registry at all for enumerating the threads 137 // and their stacks, registers, and TLS regions. So use it separately 138 // just for the allocator cache, and to call ForEachExtraStackRange, 139 // which ASan needs. 140 if (flags()->use_stacks) { 141 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( 142 [](ThreadContextBase *tctx, void *arg) { 143 ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb, 144 arg); 145 }, 146 ¶ms->argument->frontier); 147 } 148 149 params->callback(SuspendedThreadsListFuchsia(), params->argument); 150 }, 151 ¶ms); 152 153 UnlockAllocator(); 154 UnlockThreadRegistry(); 155 } 156 157 } // namespace __lsan 158 159 // This is declared (in extern "C") by <zircon/sanitizer.h>. 160 // _Exit calls this directly to intercept and change the status value. 161 int __sanitizer_process_exit_hook(int status) { 162 return __lsan::ExitHook(status); 163 } 164 165 #endif 166