1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and 11 /// FreeBSD-specific code. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "sanitizer_common/sanitizer_platform.h" 16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD 17 18 #include "hwasan.h" 19 #include "hwasan_dynamic_shadow.h" 20 #include "hwasan_interface_internal.h" 21 #include "hwasan_mapping.h" 22 #include "hwasan_report.h" 23 #include "hwasan_thread.h" 24 #include "hwasan_thread_list.h" 25 26 #include <dlfcn.h> 27 #include <elf.h> 28 #include <link.h> 29 #include <pthread.h> 30 #include <signal.h> 31 #include <stdio.h> 32 #include <stdlib.h> 33 #include <sys/resource.h> 34 #include <sys/time.h> 35 #include <unistd.h> 36 #include <unwind.h> 37 #include <sys/prctl.h> 38 #include <errno.h> 39 40 #include "sanitizer_common/sanitizer_common.h" 41 #include "sanitizer_common/sanitizer_procmaps.h" 42 43 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID. 44 // 45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF 46 // Not currently tested. 47 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON 48 // Integration tests downstream exist. 49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF 50 // Tested with check-hwasan on x86_64-linux. 51 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON 52 // Tested with check-hwasan on aarch64-linux-android. 53 #if !SANITIZER_ANDROID 54 SANITIZER_INTERFACE_ATTRIBUTE 55 THREADLOCAL uptr __hwasan_tls; 56 #endif 57 58 namespace __hwasan { 59 60 // With the zero shadow base we can not actually map pages starting from 0. 61 // This constant is somewhat arbitrary. 62 constexpr uptr kZeroBaseShadowStart = 0; 63 constexpr uptr kZeroBaseMaxShadowStart = 1 << 18; 64 65 static void ProtectGap(uptr addr, uptr size) { 66 __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart, 67 kZeroBaseMaxShadowStart); 68 } 69 70 uptr kLowMemStart; 71 uptr kLowMemEnd; 72 uptr kLowShadowEnd; 73 uptr kLowShadowStart; 74 uptr kHighShadowStart; 75 uptr kHighShadowEnd; 76 uptr kHighMemStart; 77 uptr kHighMemEnd; 78 79 static void PrintRange(uptr start, uptr end, const char *name) { 80 Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name); 81 } 82 83 static void PrintAddressSpaceLayout() { 84 PrintRange(kHighMemStart, kHighMemEnd, "HighMem"); 85 if (kHighShadowEnd + 1 < kHighMemStart) 86 PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap"); 87 else 88 CHECK_EQ(kHighShadowEnd + 1, kHighMemStart); 89 PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow"); 90 if (kLowShadowEnd + 1 < kHighShadowStart) 91 PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap"); 92 else 93 CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); 94 PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow"); 95 if (kLowMemEnd + 1 < kLowShadowStart) 96 PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap"); 97 else 98 CHECK_EQ(kLowMemEnd + 1, kLowShadowStart); 99 PrintRange(kLowMemStart, kLowMemEnd, "LowMem"); 100 CHECK_EQ(0, kLowMemStart); 101 } 102 103 static uptr GetHighMemEnd() { 104 // HighMem covers the upper part of the address space. 105 uptr max_address = GetMaxUserVirtualAddress(); 106 // Adjust max address to make sure that kHighMemEnd and kHighMemStart are 107 // properly aligned: 108 max_address |= (GetMmapGranularity() << kShadowScale) - 1; 109 return max_address; 110 } 111 112 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { 113 __hwasan_shadow_memory_dynamic_address = 114 FindDynamicShadowStart(shadow_size_bytes); 115 } 116 117 void InitPrctl() { 118 #define PR_SET_TAGGED_ADDR_CTRL 55 119 #define PR_GET_TAGGED_ADDR_CTRL 56 120 #define PR_TAGGED_ADDR_ENABLE (1UL << 0) 121 // Check we're running on a kernel that can use the tagged address ABI. 122 if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 && 123 errno == EINVAL) { 124 #if SANITIZER_ANDROID 125 // Some older Android kernels have the tagged pointer ABI on 126 // unconditionally, and hence don't have the tagged-addr prctl while still 127 // allow the ABI. 128 // If targeting Android and the prctl is not around we assume this is the 129 // case. 130 return; 131 #else 132 Printf( 133 "FATAL: " 134 "HWAddressSanitizer requires a kernel with tagged address ABI.\n"); 135 Die(); 136 #endif 137 } 138 139 // Turn on the tagged address ABI. 140 if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 141 (uptr)-1 || 142 !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) { 143 Printf( 144 "FATAL: HWAddressSanitizer failed to enable tagged address syscall " 145 "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` " 146 "configuration.\n"); 147 Die(); 148 } 149 #undef PR_SET_TAGGED_ADDR_CTRL 150 #undef PR_GET_TAGGED_ADDR_CTRL 151 #undef PR_TAGGED_ADDR_ENABLE 152 } 153 154 bool InitShadow() { 155 // Define the entire memory range. 156 kHighMemEnd = GetHighMemEnd(); 157 158 // Determine shadow memory base offset. 159 InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd)); 160 161 // Place the low memory first. 162 kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1; 163 kLowMemStart = 0; 164 165 // Define the low shadow based on the already placed low memory. 166 kLowShadowEnd = MemToShadow(kLowMemEnd); 167 kLowShadowStart = __hwasan_shadow_memory_dynamic_address; 168 169 // High shadow takes whatever memory is left up there (making sure it is not 170 // interfering with low memory in the fixed case). 171 kHighShadowEnd = MemToShadow(kHighMemEnd); 172 kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1; 173 174 // High memory starts where allocated shadow allows. 175 kHighMemStart = ShadowToMem(kHighShadowStart); 176 177 // Check the sanity of the defined memory ranges (there might be gaps). 178 CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); 179 CHECK_GT(kHighMemStart, kHighShadowEnd); 180 CHECK_GT(kHighShadowEnd, kHighShadowStart); 181 CHECK_GT(kHighShadowStart, kLowMemEnd); 182 CHECK_GT(kLowMemEnd, kLowMemStart); 183 CHECK_GT(kLowShadowEnd, kLowShadowStart); 184 CHECK_GT(kLowShadowStart, kLowMemEnd); 185 186 if (Verbosity()) 187 PrintAddressSpaceLayout(); 188 189 // Reserve shadow memory. 190 ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow"); 191 ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow"); 192 193 // Protect all the gaps. 194 ProtectGap(0, Min(kLowMemStart, kLowShadowStart)); 195 if (kLowMemEnd + 1 < kLowShadowStart) 196 ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1); 197 if (kLowShadowEnd + 1 < kHighShadowStart) 198 ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1); 199 if (kHighShadowEnd + 1 < kHighMemStart) 200 ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1); 201 202 return true; 203 } 204 205 void InitThreads() { 206 CHECK(__hwasan_shadow_memory_dynamic_address); 207 uptr guard_page_size = GetMmapGranularity(); 208 uptr thread_space_start = 209 __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment); 210 uptr thread_space_end = 211 __hwasan_shadow_memory_dynamic_address - guard_page_size; 212 ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1, 213 "hwasan threads", /*madvise_shadow*/ false); 214 ProtectGap(thread_space_end, 215 __hwasan_shadow_memory_dynamic_address - thread_space_end); 216 InitThreadList(thread_space_start, thread_space_end - thread_space_start); 217 } 218 219 bool MemIsApp(uptr p) { 220 CHECK(GetTagFromPointer(p) == 0); 221 return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd); 222 } 223 224 static void HwasanAtExit(void) { 225 if (common_flags()->print_module_map) 226 DumpProcessMap(); 227 if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0)) 228 ReportStats(); 229 if (hwasan_report_count > 0) { 230 // ReportAtExitStatistics(); 231 if (common_flags()->exitcode) 232 internal__exit(common_flags()->exitcode); 233 } 234 } 235 236 void InstallAtExitHandler() { 237 atexit(HwasanAtExit); 238 } 239 240 // ---------------------- TSD ---------------- {{{1 241 242 extern "C" void __hwasan_thread_enter() { 243 hwasanThreadList().CreateCurrentThread()->InitRandomState(); 244 } 245 246 extern "C" void __hwasan_thread_exit() { 247 Thread *t = GetCurrentThread(); 248 // Make sure that signal handler can not see a stale current thread pointer. 249 atomic_signal_fence(memory_order_seq_cst); 250 if (t) 251 hwasanThreadList().ReleaseThread(t); 252 } 253 254 #if HWASAN_WITH_INTERCEPTORS 255 static pthread_key_t tsd_key; 256 static bool tsd_key_inited = false; 257 258 void HwasanTSDThreadInit() { 259 if (tsd_key_inited) 260 CHECK_EQ(0, pthread_setspecific(tsd_key, 261 (void *)GetPthreadDestructorIterations())); 262 } 263 264 void HwasanTSDDtor(void *tsd) { 265 uptr iterations = (uptr)tsd; 266 if (iterations > 1) { 267 CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1))); 268 return; 269 } 270 __hwasan_thread_exit(); 271 } 272 273 void HwasanTSDInit() { 274 CHECK(!tsd_key_inited); 275 tsd_key_inited = true; 276 CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor)); 277 } 278 #else 279 void HwasanTSDInit() {} 280 void HwasanTSDThreadInit() {} 281 #endif 282 283 #if SANITIZER_ANDROID 284 uptr *GetCurrentThreadLongPtr() { 285 return (uptr *)get_android_tls_ptr(); 286 } 287 #else 288 uptr *GetCurrentThreadLongPtr() { 289 return &__hwasan_tls; 290 } 291 #endif 292 293 #if SANITIZER_ANDROID 294 void AndroidTestTlsSlot() { 295 uptr kMagicValue = 0x010203040A0B0C0D; 296 uptr *tls_ptr = GetCurrentThreadLongPtr(); 297 uptr old_value = *tls_ptr; 298 *tls_ptr = kMagicValue; 299 dlerror(); 300 if (*(uptr *)get_android_tls_ptr() != kMagicValue) { 301 Printf( 302 "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used " 303 "for dlerror().\n"); 304 Die(); 305 } 306 *tls_ptr = old_value; 307 } 308 #else 309 void AndroidTestTlsSlot() {} 310 #endif 311 312 Thread *GetCurrentThread() { 313 uptr *ThreadLongPtr = GetCurrentThreadLongPtr(); 314 if (UNLIKELY(*ThreadLongPtr == 0)) 315 return nullptr; 316 auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr; 317 return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next()); 318 } 319 320 struct AccessInfo { 321 uptr addr; 322 uptr size; 323 bool is_store; 324 bool is_load; 325 bool recover; 326 }; 327 328 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { 329 // Access type is passed in a platform dependent way (see below) and encoded 330 // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is 331 // recoverable. Valid values of Y are 0 to 4, which are interpreted as 332 // log2(access_size), and 0xF, which means that access size is passed via 333 // platform dependent register (see below). 334 #if defined(__aarch64__) 335 // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF, 336 // access size is stored in X1 register. Access address is always in X0 337 // register. 338 uptr pc = (uptr)info->si_addr; 339 const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; 340 if ((code & 0xff00) != 0x900) 341 return AccessInfo{}; // Not ours. 342 343 const bool is_store = code & 0x10; 344 const bool recover = code & 0x20; 345 const uptr addr = uc->uc_mcontext.regs[0]; 346 const unsigned size_log = code & 0xf; 347 if (size_log > 4 && size_log != 0xf) 348 return AccessInfo{}; // Not ours. 349 const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log; 350 351 #elif defined(__x86_64__) 352 // Access type is encoded in the instruction following INT3 as 353 // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in 354 // RSI register. Access address is always in RDI register. 355 uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP]; 356 uint8_t *nop = (uint8_t*)pc; 357 if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 || 358 *(nop + 3) < 0x40) 359 return AccessInfo{}; // Not ours. 360 const unsigned code = *(nop + 3); 361 362 const bool is_store = code & 0x10; 363 const bool recover = code & 0x20; 364 const uptr addr = uc->uc_mcontext.gregs[REG_RDI]; 365 const unsigned size_log = code & 0xf; 366 if (size_log > 4 && size_log != 0xf) 367 return AccessInfo{}; // Not ours. 368 const uptr size = 369 size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log; 370 371 #else 372 # error Unsupported architecture 373 #endif 374 375 return AccessInfo{addr, size, is_store, !is_store, recover}; 376 } 377 378 static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, 379 ucontext_t *uc, uptr *registers_frame = nullptr) { 380 InternalMmapVector<BufferedStackTrace> stack_buffer(1); 381 BufferedStackTrace *stack = stack_buffer.data(); 382 stack->Reset(); 383 stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal); 384 385 // The second stack frame contains the failure __hwasan_check function, as 386 // we have a stack frame for the registers saved in __hwasan_tag_mismatch that 387 // we wish to ignore. This (currently) only occurs on AArch64, as x64 388 // implementations use SIGTRAP to implement the failure, and thus do not go 389 // through the stack saver. 390 if (registers_frame && stack->trace && stack->size > 0) { 391 stack->trace++; 392 stack->size--; 393 } 394 395 bool fatal = flags()->halt_on_error || !ai.recover; 396 ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal, 397 registers_frame); 398 } 399 400 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) { 401 AccessInfo ai = GetAccessInfo(info, uc); 402 if (!ai.is_store && !ai.is_load) 403 return false; 404 405 SignalContext sig{info, uc}; 406 HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc); 407 408 #if defined(__aarch64__) 409 uc->uc_mcontext.pc += 4; 410 #elif defined(__x86_64__) 411 #else 412 # error Unsupported architecture 413 #endif 414 return true; 415 } 416 417 static void OnStackUnwind(const SignalContext &sig, const void *, 418 BufferedStackTrace *stack) { 419 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 420 common_flags()->fast_unwind_on_fatal); 421 } 422 423 void HwasanOnDeadlySignal(int signo, void *info, void *context) { 424 // Probably a tag mismatch. 425 if (signo == SIGTRAP) 426 if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context)) 427 return; 428 429 HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr); 430 } 431 432 433 } // namespace __hwasan 434 435 // Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the 436 // rest of the mismatch handling code (C++). 437 void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame, 438 size_t outsize) { 439 __hwasan::AccessInfo ai; 440 ai.is_store = access_info & 0x10; 441 ai.is_load = !ai.is_store; 442 ai.recover = access_info & 0x20; 443 ai.addr = addr; 444 if ((access_info & 0xf) == 0xf) 445 ai.size = outsize; 446 else 447 ai.size = 1 << (access_info & 0xf); 448 449 __hwasan::HandleTagMismatch(ai, (uptr)__builtin_return_address(0), 450 (uptr)__builtin_frame_address(0), nullptr, 451 registers_frame); 452 __builtin_unreachable(); 453 } 454 455 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD 456