1 //===-- sanitizer_linux_libcdep.cpp ---------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between AddressSanitizer and ThreadSanitizer 10 // run-time libraries and implements linux-specific functions from 11 // sanitizer_libc.h. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_platform.h" 15 16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ 17 SANITIZER_SOLARIS 18 19 #include "sanitizer_allocator_internal.h" 20 #include "sanitizer_atomic.h" 21 #include "sanitizer_common.h" 22 #include "sanitizer_file.h" 23 #include "sanitizer_flags.h" 24 #include "sanitizer_freebsd.h" 25 #include "sanitizer_getauxval.h" 26 #include "sanitizer_glibc_version.h" 27 #include "sanitizer_linux.h" 28 #include "sanitizer_placement_new.h" 29 #include "sanitizer_procmaps.h" 30 #include "sanitizer_solaris.h" 31 32 #if SANITIZER_NETBSD 33 #define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast() 34 #endif 35 36 #include <dlfcn.h> // for dlsym() 37 #include <link.h> 38 #include <pthread.h> 39 #include <signal.h> 40 #include <sys/mman.h> 41 #include <sys/resource.h> 42 #include <syslog.h> 43 44 #if !defined(ElfW) 45 #define ElfW(type) Elf_##type 46 #endif 47 48 #if SANITIZER_FREEBSD 49 #include <pthread_np.h> 50 #include <stdlib.h> 51 #include <osreldate.h> 52 #include <sys/auxv.h> 53 #include <sys/sysctl.h> 54 #define pthread_getattr_np pthread_attr_get_np 55 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before 56 // that, it was never implemented. So just define it to zero. 57 #undef MAP_NORESERVE 58 #define MAP_NORESERVE 0 59 extern const Elf_Auxinfo *__elf_aux_vector; 60 #endif 61 62 #if SANITIZER_NETBSD 63 #include <sys/sysctl.h> 64 #include <sys/tls.h> 65 #include <lwp.h> 66 #endif 67 68 #if SANITIZER_SOLARIS 69 #include <stddef.h> 70 #include <stdlib.h> 71 #include <thread.h> 72 #endif 73 74 #if SANITIZER_ANDROID 75 #include <android/api-level.h> 76 #if !defined(CPU_COUNT) && !defined(__aarch64__) 77 #include <dirent.h> 78 #include <fcntl.h> 79 struct __sanitizer::linux_dirent { 80 long d_ino; 81 off_t d_off; 82 unsigned short d_reclen; 83 char d_name[]; 84 }; 85 #endif 86 #endif 87 88 #if !SANITIZER_ANDROID 89 #include <elf.h> 90 #include <unistd.h> 91 #endif 92 93 namespace __sanitizer { 94 95 SANITIZER_WEAK_ATTRIBUTE int 96 real_sigaction(int signum, const void *act, void *oldact); 97 98 int internal_sigaction(int signum, const void *act, void *oldact) { 99 #if !SANITIZER_GO 100 if (&real_sigaction) 101 return real_sigaction(signum, act, oldact); 102 #endif 103 return sigaction(signum, (const struct sigaction *)act, 104 (struct sigaction *)oldact); 105 } 106 107 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 108 uptr *stack_bottom) { 109 CHECK(stack_top); 110 CHECK(stack_bottom); 111 if (at_initialization) { 112 // This is the main thread. Libpthread may not be initialized yet. 113 struct rlimit rl; 114 CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); 115 116 // Find the mapping that contains a stack variable. 117 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 118 if (proc_maps.Error()) { 119 *stack_top = *stack_bottom = 0; 120 return; 121 } 122 MemoryMappedSegment segment; 123 uptr prev_end = 0; 124 while (proc_maps.Next(&segment)) { 125 if ((uptr)&rl < segment.end) break; 126 prev_end = segment.end; 127 } 128 CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end); 129 130 // Get stacksize from rlimit, but clip it so that it does not overlap 131 // with other mappings. 132 uptr stacksize = rl.rlim_cur; 133 if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end; 134 // When running with unlimited stack size, we still want to set some limit. 135 // The unlimited stack size is caused by 'ulimit -s unlimited'. 136 // Also, for some reason, GNU make spawns subprocesses with unlimited stack. 137 if (stacksize > kMaxThreadStackSize) 138 stacksize = kMaxThreadStackSize; 139 *stack_top = segment.end; 140 *stack_bottom = segment.end - stacksize; 141 return; 142 } 143 uptr stacksize = 0; 144 void *stackaddr = nullptr; 145 #if SANITIZER_SOLARIS 146 stack_t ss; 147 CHECK_EQ(thr_stksegment(&ss), 0); 148 stacksize = ss.ss_size; 149 stackaddr = (char *)ss.ss_sp - stacksize; 150 #else // !SANITIZER_SOLARIS 151 pthread_attr_t attr; 152 pthread_attr_init(&attr); 153 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); 154 my_pthread_attr_getstack(&attr, &stackaddr, &stacksize); 155 pthread_attr_destroy(&attr); 156 #endif // SANITIZER_SOLARIS 157 158 *stack_top = (uptr)stackaddr + stacksize; 159 *stack_bottom = (uptr)stackaddr; 160 } 161 162 #if !SANITIZER_GO 163 bool SetEnv(const char *name, const char *value) { 164 void *f = dlsym(RTLD_NEXT, "setenv"); 165 if (!f) 166 return false; 167 typedef int(*setenv_ft)(const char *name, const char *value, int overwrite); 168 setenv_ft setenv_f; 169 CHECK_EQ(sizeof(setenv_f), sizeof(f)); 170 internal_memcpy(&setenv_f, &f, sizeof(f)); 171 return setenv_f(name, value, 1) == 0; 172 } 173 #endif 174 175 __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor, 176 int *patch) { 177 #ifdef _CS_GNU_LIBC_VERSION 178 char buf[64]; 179 uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf)); 180 if (len >= sizeof(buf)) 181 return false; 182 buf[len] = 0; 183 static const char kGLibC[] = "glibc "; 184 if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0) 185 return false; 186 const char *p = buf + sizeof(kGLibC) - 1; 187 *major = internal_simple_strtoll(p, &p, 10); 188 *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0; 189 *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0; 190 return true; 191 #else 192 return false; 193 #endif 194 } 195 196 // True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ 197 // #19826) so dlpi_tls_data cannot be used. 198 // 199 // musl before 1.2.3 and FreeBSD as of 12.2 incorrectly set dlpi_tls_data to 200 // the TLS initialization image 201 // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774 202 __attribute__((unused)) static int g_use_dlpi_tls_data; 203 204 #if SANITIZER_GLIBC && !SANITIZER_GO 205 __attribute__((unused)) static size_t g_tls_size; 206 void InitTlsSize() { 207 int major, minor, patch; 208 g_use_dlpi_tls_data = 209 GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25; 210 211 #if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) || \ 212 defined(__loongarch__) 213 void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); 214 size_t tls_align; 215 ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align); 216 #endif 217 } 218 #else 219 void InitTlsSize() { } 220 #endif // SANITIZER_GLIBC && !SANITIZER_GO 221 222 // On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage 223 // of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan 224 // to get the pointer to thread-specific data keys in the thread control block. 225 #if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \ 226 !SANITIZER_ANDROID && !SANITIZER_GO 227 // sizeof(struct pthread) from glibc. 228 static atomic_uintptr_t thread_descriptor_size; 229 230 static uptr ThreadDescriptorSizeFallback() { 231 uptr val = 0; 232 #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) 233 int major; 234 int minor; 235 int patch; 236 if (GetLibcVersion(&major, &minor, &patch) && major == 2) { 237 /* sizeof(struct pthread) values from various glibc versions. */ 238 if (SANITIZER_X32) 239 val = 1728; // Assume only one particular version for x32. 240 // For ARM sizeof(struct pthread) changed in Glibc 2.23. 241 else if (SANITIZER_ARM) 242 val = minor <= 22 ? 1120 : 1216; 243 else if (minor <= 3) 244 val = FIRST_32_SECOND_64(1104, 1696); 245 else if (minor == 4) 246 val = FIRST_32_SECOND_64(1120, 1728); 247 else if (minor == 5) 248 val = FIRST_32_SECOND_64(1136, 1728); 249 else if (minor <= 9) 250 val = FIRST_32_SECOND_64(1136, 1712); 251 else if (minor == 10) 252 val = FIRST_32_SECOND_64(1168, 1776); 253 else if (minor == 11 || (minor == 12 && patch == 1)) 254 val = FIRST_32_SECOND_64(1168, 2288); 255 else if (minor <= 14) 256 val = FIRST_32_SECOND_64(1168, 2304); 257 else if (minor < 32) // Unknown version 258 val = FIRST_32_SECOND_64(1216, 2304); 259 else // minor == 32 260 val = FIRST_32_SECOND_64(1344, 2496); 261 } 262 #elif defined(__s390__) || defined(__sparc__) 263 // The size of a prefix of TCB including pthread::{specific_1stblock,specific} 264 // suffices. Just return offsetof(struct pthread, specific_used), which hasn't 265 // changed since 2007-05. Technically this applies to i386/x86_64 as well but 266 // we call _dl_get_tls_static_info and need the precise size of struct 267 // pthread. 268 return FIRST_32_SECOND_64(524, 1552); 269 #elif defined(__mips__) 270 // TODO(sagarthakur): add more values as per different glibc versions. 271 val = FIRST_32_SECOND_64(1152, 1776); 272 #elif SANITIZER_LOONGARCH64 273 val = 1856; // from glibc 2.36 274 #elif SANITIZER_RISCV64 275 int major; 276 int minor; 277 int patch; 278 if (GetLibcVersion(&major, &minor, &patch) && major == 2) { 279 // TODO: consider adding an optional runtime check for an unknown (untested) 280 // glibc version 281 if (minor <= 28) // WARNING: the highest tested version is 2.29 282 val = 1772; // no guarantees for this one 283 else if (minor <= 31) 284 val = 1772; // tested against glibc 2.29, 2.31 285 else 286 val = 1936; // tested against glibc 2.32 287 } 288 289 #elif defined(__aarch64__) 290 // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. 291 val = 1776; 292 #elif defined(__powerpc64__) 293 val = 1776; // from glibc.ppc64le 2.20-8.fc21 294 #endif 295 return val; 296 } 297 298 uptr ThreadDescriptorSize() { 299 uptr val = atomic_load_relaxed(&thread_descriptor_size); 300 if (val) 301 return val; 302 // _thread_db_sizeof_pthread is a GLIBC_PRIVATE symbol that is exported in 303 // glibc 2.34 and later. 304 if (unsigned *psizeof = static_cast<unsigned *>( 305 dlsym(RTLD_DEFAULT, "_thread_db_sizeof_pthread"))) 306 val = *psizeof; 307 if (!val) 308 val = ThreadDescriptorSizeFallback(); 309 atomic_store_relaxed(&thread_descriptor_size, val); 310 return val; 311 } 312 313 #if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \ 314 SANITIZER_LOONGARCH64 315 // TlsPreTcbSize includes size of struct pthread_descr and size of tcb 316 // head structure. It lies before the static tls blocks. 317 static uptr TlsPreTcbSize() { 318 #if defined(__mips__) 319 const uptr kTcbHead = 16; // sizeof (tcbhead_t) 320 #elif defined(__powerpc64__) 321 const uptr kTcbHead = 88; // sizeof (tcbhead_t) 322 #elif SANITIZER_RISCV64 323 const uptr kTcbHead = 16; // sizeof (tcbhead_t) 324 #elif SANITIZER_LOONGARCH64 325 const uptr kTcbHead = 16; // sizeof (tcbhead_t) 326 #endif 327 const uptr kTlsAlign = 16; 328 const uptr kTlsPreTcbSize = 329 RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign); 330 return kTlsPreTcbSize; 331 } 332 #endif 333 334 namespace { 335 struct TlsBlock { 336 uptr begin, end, align; 337 size_t tls_modid; 338 bool operator<(const TlsBlock &rhs) const { return begin < rhs.begin; } 339 }; 340 } // namespace 341 342 #ifdef __s390__ 343 extern "C" uptr __tls_get_offset(void *arg); 344 345 static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) { 346 // The __tls_get_offset ABI requires %r12 to point to GOT and %r2 to be an 347 // offset of a struct tls_index inside GOT. We don't possess either of the 348 // two, so violate the letter of the "ELF Handling For Thread-Local 349 // Storage" document and assume that the implementation just dereferences 350 // %r2 + %r12. 351 uptr tls_index[2] = {ti_module, ti_offset}; 352 register uptr r2 asm("2") = 0; 353 register void *r12 asm("12") = tls_index; 354 asm("basr %%r14, %[__tls_get_offset]" 355 : "+r"(r2) 356 : [__tls_get_offset] "r"(__tls_get_offset), "r"(r12) 357 : "memory", "cc", "0", "1", "3", "4", "5", "14"); 358 return r2; 359 } 360 #else 361 extern "C" void *__tls_get_addr(size_t *); 362 #endif 363 364 static size_t main_tls_modid; 365 366 static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size, 367 void *data) { 368 size_t tls_modid; 369 #if SANITIZER_SOLARIS 370 // dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use 371 // dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3, 372 // 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in 373 // 11.4 to match other implementations. 374 if (size >= offsetof(dl_phdr_info_test, dlpi_tls_modid)) 375 main_tls_modid = 1; 376 else 377 main_tls_modid = 0; 378 g_use_dlpi_tls_data = 0; 379 Rt_map *map; 380 dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map); 381 tls_modid = map->rt_tlsmodid; 382 #else 383 main_tls_modid = 1; 384 tls_modid = info->dlpi_tls_modid; 385 #endif 386 387 if (tls_modid < main_tls_modid) 388 return 0; 389 uptr begin; 390 #if !SANITIZER_SOLARIS 391 begin = (uptr)info->dlpi_tls_data; 392 #endif 393 if (!g_use_dlpi_tls_data) { 394 // Call __tls_get_addr as a fallback. This forces TLS allocation on glibc 395 // and FreeBSD. 396 #ifdef __s390__ 397 begin = (uptr)__builtin_thread_pointer() + 398 TlsGetOffset(tls_modid, 0); 399 #else 400 size_t mod_and_off[2] = {tls_modid, 0}; 401 begin = (uptr)__tls_get_addr(mod_and_off); 402 #endif 403 } 404 for (unsigned i = 0; i != info->dlpi_phnum; ++i) 405 if (info->dlpi_phdr[i].p_type == PT_TLS) { 406 static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back( 407 TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz, 408 info->dlpi_phdr[i].p_align, tls_modid}); 409 break; 410 } 411 return 0; 412 } 413 414 __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size, 415 uptr *align) { 416 InternalMmapVector<TlsBlock> ranges; 417 dl_iterate_phdr(CollectStaticTlsBlocks, &ranges); 418 uptr len = ranges.size(); 419 Sort(ranges.begin(), len); 420 // Find the range with tls_modid == main_tls_modid. For glibc, because 421 // libc.so uses PT_TLS, this module is guaranteed to exist and is one of 422 // the initially loaded modules. 423 uptr one = 0; 424 while (one != len && ranges[one].tls_modid != main_tls_modid) ++one; 425 if (one == len) { 426 // This may happen with musl if no module uses PT_TLS. 427 *addr = 0; 428 *size = 0; 429 *align = 1; 430 return; 431 } 432 // Find the maximum consecutive ranges. We consider two modules consecutive if 433 // the gap is smaller than the alignment of the latter range. The dynamic 434 // loader places static TLS blocks this way not to waste space. 435 uptr l = one; 436 *align = ranges[l].align; 437 while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l].align) 438 *align = Max(*align, ranges[--l].align); 439 uptr r = one + 1; 440 while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r].align) 441 *align = Max(*align, ranges[r++].align); 442 *addr = ranges[l].begin; 443 *size = ranges[r - 1].end - ranges[l].begin; 444 } 445 #endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD || 446 // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO 447 448 #if SANITIZER_NETBSD 449 static struct tls_tcb * ThreadSelfTlsTcb() { 450 struct tls_tcb *tcb = nullptr; 451 #ifdef __HAVE___LWP_GETTCB_FAST 452 tcb = (struct tls_tcb *)__lwp_gettcb_fast(); 453 #elif defined(__HAVE___LWP_GETPRIVATE_FAST) 454 tcb = (struct tls_tcb *)__lwp_getprivate_fast(); 455 #endif 456 return tcb; 457 } 458 459 uptr ThreadSelf() { 460 return (uptr)ThreadSelfTlsTcb()->tcb_pthread; 461 } 462 463 int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) { 464 const Elf_Phdr *hdr = info->dlpi_phdr; 465 const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum; 466 467 for (; hdr != last_hdr; ++hdr) { 468 if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) { 469 *(uptr*)data = hdr->p_memsz; 470 break; 471 } 472 } 473 return 0; 474 } 475 #endif // SANITIZER_NETBSD 476 477 #if SANITIZER_ANDROID 478 // Bionic provides this API since S. 479 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **, 480 void **); 481 #endif 482 483 #if !SANITIZER_GO 484 static void GetTls(uptr *addr, uptr *size) { 485 #if SANITIZER_ANDROID 486 if (&__libc_get_static_tls_bounds) { 487 void *start_addr; 488 void *end_addr; 489 __libc_get_static_tls_bounds(&start_addr, &end_addr); 490 *addr = reinterpret_cast<uptr>(start_addr); 491 *size = 492 reinterpret_cast<uptr>(end_addr) - reinterpret_cast<uptr>(start_addr); 493 } else { 494 *addr = 0; 495 *size = 0; 496 } 497 #elif SANITIZER_GLIBC && defined(__x86_64__) 498 // For aarch64 and x86-64, use an O(1) approach which requires relatively 499 // precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize. 500 # if SANITIZER_X32 501 asm("mov %%fs:8,%0" : "=r"(*addr)); 502 # else 503 asm("mov %%fs:16,%0" : "=r"(*addr)); 504 # endif 505 *size = g_tls_size; 506 *addr -= *size; 507 *addr += ThreadDescriptorSize(); 508 #elif SANITIZER_GLIBC && defined(__aarch64__) 509 *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) - 510 ThreadDescriptorSize(); 511 *size = g_tls_size + ThreadDescriptorSize(); 512 #elif SANITIZER_GLIBC && defined(__loongarch__) 513 # ifdef __clang__ 514 *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) - 515 ThreadDescriptorSize(); 516 # else 517 asm("or %0,$tp,$zero" : "=r"(*addr)); 518 *addr -= ThreadDescriptorSize(); 519 # endif 520 *size = g_tls_size + ThreadDescriptorSize(); 521 #elif SANITIZER_GLIBC && defined(__powerpc64__) 522 // Workaround for glibc<2.25(?). 2.27 is known to not need this. 523 uptr tp; 524 asm("addi %0,13,-0x7000" : "=r"(tp)); 525 const uptr pre_tcb_size = TlsPreTcbSize(); 526 *addr = tp - pre_tcb_size; 527 *size = g_tls_size + pre_tcb_size; 528 #elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS 529 uptr align; 530 GetStaticTlsBoundary(addr, size, &align); 531 #if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \ 532 defined(__sparc__) 533 if (SANITIZER_GLIBC) { 534 #if defined(__x86_64__) || defined(__i386__) 535 align = Max<uptr>(align, 64); 536 #else 537 align = Max<uptr>(align, 16); 538 #endif 539 } 540 const uptr tp = RoundUpTo(*addr + *size, align); 541 542 // lsan requires the range to additionally cover the static TLS surplus 543 // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for 544 // allocations only referenced by tls in dynamically loaded modules. 545 if (SANITIZER_GLIBC) 546 *size += 1644; 547 else if (SANITIZER_FREEBSD) 548 *size += 128; // RTLD_STATIC_TLS_EXTRA 549 550 // Extend the range to include the thread control block. On glibc, lsan needs 551 // the range to include pthread::{specific_1stblock,specific} so that 552 // allocations only referenced by pthread_setspecific can be scanned. This may 553 // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine 554 // because the number of bytes after pthread::specific is larger. 555 *addr = tp - RoundUpTo(*size, align); 556 *size = tp - *addr + ThreadDescriptorSize(); 557 #else 558 if (SANITIZER_GLIBC) 559 *size += 1664; 560 else if (SANITIZER_FREEBSD) 561 *size += 128; // RTLD_STATIC_TLS_EXTRA 562 #if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 563 const uptr pre_tcb_size = TlsPreTcbSize(); 564 *addr -= pre_tcb_size; 565 *size += pre_tcb_size; 566 #else 567 // arm and aarch64 reserve two words at TP, so this underestimates the range. 568 // However, this is sufficient for the purpose of finding the pointers to 569 // thread-specific data keys. 570 const uptr tcb_size = ThreadDescriptorSize(); 571 *addr -= tcb_size; 572 *size += tcb_size; 573 #endif 574 #endif 575 #elif SANITIZER_NETBSD 576 struct tls_tcb * const tcb = ThreadSelfTlsTcb(); 577 *addr = 0; 578 *size = 0; 579 if (tcb != 0) { 580 // Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program). 581 // ld.elf_so hardcodes the index 1. 582 dl_iterate_phdr(GetSizeFromHdr, size); 583 584 if (*size != 0) { 585 // The block has been found and tcb_dtv[1] contains the base address 586 *addr = (uptr)tcb->tcb_dtv[1]; 587 } 588 } 589 #else 590 #error "Unknown OS" 591 #endif 592 } 593 #endif 594 595 #if !SANITIZER_GO 596 uptr GetTlsSize() { 597 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ 598 SANITIZER_SOLARIS 599 uptr addr, size; 600 GetTls(&addr, &size); 601 return size; 602 #else 603 return 0; 604 #endif 605 } 606 #endif 607 608 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 609 uptr *tls_addr, uptr *tls_size) { 610 #if SANITIZER_GO 611 // Stub implementation for Go. 612 *stk_addr = *stk_size = *tls_addr = *tls_size = 0; 613 #else 614 GetTls(tls_addr, tls_size); 615 616 uptr stack_top, stack_bottom; 617 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); 618 *stk_addr = stack_bottom; 619 *stk_size = stack_top - stack_bottom; 620 621 if (!main) { 622 // If stack and tls intersect, make them non-intersecting. 623 if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) { 624 if (*stk_addr + *stk_size < *tls_addr + *tls_size) 625 *tls_size = *stk_addr + *stk_size - *tls_addr; 626 *stk_size = *tls_addr - *stk_addr; 627 } 628 } 629 #endif 630 } 631 632 #if !SANITIZER_FREEBSD 633 typedef ElfW(Phdr) Elf_Phdr; 634 #elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2 635 #define Elf_Phdr XElf32_Phdr 636 #define dl_phdr_info xdl_phdr_info 637 #define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b)) 638 #endif // !SANITIZER_FREEBSD 639 640 struct DlIteratePhdrData { 641 InternalMmapVectorNoCtor<LoadedModule> *modules; 642 bool first; 643 }; 644 645 static int AddModuleSegments(const char *module_name, dl_phdr_info *info, 646 InternalMmapVectorNoCtor<LoadedModule> *modules) { 647 if (module_name[0] == '\0') 648 return 0; 649 LoadedModule cur_module; 650 cur_module.set(module_name, info->dlpi_addr); 651 for (int i = 0; i < (int)info->dlpi_phnum; i++) { 652 const Elf_Phdr *phdr = &info->dlpi_phdr[i]; 653 if (phdr->p_type == PT_LOAD) { 654 uptr cur_beg = info->dlpi_addr + phdr->p_vaddr; 655 uptr cur_end = cur_beg + phdr->p_memsz; 656 bool executable = phdr->p_flags & PF_X; 657 bool writable = phdr->p_flags & PF_W; 658 cur_module.addAddressRange(cur_beg, cur_end, executable, 659 writable); 660 } else if (phdr->p_type == PT_NOTE) { 661 # ifdef NT_GNU_BUILD_ID 662 uptr off = 0; 663 while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) { 664 auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr + 665 phdr->p_vaddr + off); 666 constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte. 667 static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4."); 668 if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) { 669 if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz > 670 phdr->p_memsz) { 671 // Something is very wrong, bail out instead of reading potentially 672 // arbitrary memory. 673 break; 674 } 675 const char *name = 676 reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr); 677 if (internal_memcmp(name, "GNU", 3) == 0) { 678 const char *value = reinterpret_cast<const char *>(nhdr) + 679 sizeof(*nhdr) + kGnuNamesz; 680 cur_module.setUuid(value, nhdr->n_descsz); 681 break; 682 } 683 } 684 off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) + 685 RoundUpTo(nhdr->n_descsz, 4); 686 } 687 # endif 688 } 689 } 690 modules->push_back(cur_module); 691 return 0; 692 } 693 694 static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { 695 DlIteratePhdrData *data = (DlIteratePhdrData *)arg; 696 if (data->first) { 697 InternalMmapVector<char> module_name(kMaxPathLength); 698 data->first = false; 699 // First module is the binary itself. 700 ReadBinaryNameCached(module_name.data(), module_name.size()); 701 return AddModuleSegments(module_name.data(), info, data->modules); 702 } 703 704 if (info->dlpi_name) { 705 InternalScopedString module_name; 706 module_name.append("%s", info->dlpi_name); 707 return AddModuleSegments(module_name.data(), info, data->modules); 708 } 709 710 return 0; 711 } 712 713 #if SANITIZER_ANDROID && __ANDROID_API__ < 21 714 extern "C" __attribute__((weak)) int dl_iterate_phdr( 715 int (*)(struct dl_phdr_info *, size_t, void *), void *); 716 #endif 717 718 static bool requiresProcmaps() { 719 #if SANITIZER_ANDROID && __ANDROID_API__ <= 22 720 // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken. 721 // The runtime check allows the same library to work with 722 // both K and L (and future) Android releases. 723 return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1; 724 #else 725 return false; 726 #endif 727 } 728 729 static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) { 730 MemoryMappingLayout memory_mapping(/*cache_enabled*/true); 731 memory_mapping.DumpListOfModules(modules); 732 } 733 734 void ListOfModules::init() { 735 clearOrInit(); 736 if (requiresProcmaps()) { 737 procmapsInit(&modules_); 738 } else { 739 DlIteratePhdrData data = {&modules_, true}; 740 dl_iterate_phdr(dl_iterate_phdr_cb, &data); 741 } 742 } 743 744 // When a custom loader is used, dl_iterate_phdr may not contain the full 745 // list of modules. Allow callers to fall back to using procmaps. 746 void ListOfModules::fallbackInit() { 747 if (!requiresProcmaps()) { 748 clearOrInit(); 749 procmapsInit(&modules_); 750 } else { 751 clear(); 752 } 753 } 754 755 // getrusage does not give us the current RSS, only the max RSS. 756 // Still, this is better than nothing if /proc/self/statm is not available 757 // for some reason, e.g. due to a sandbox. 758 static uptr GetRSSFromGetrusage() { 759 struct rusage usage; 760 if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox. 761 return 0; 762 return usage.ru_maxrss << 10; // ru_maxrss is in Kb. 763 } 764 765 uptr GetRSS() { 766 if (!common_flags()->can_use_proc_maps_statm) 767 return GetRSSFromGetrusage(); 768 fd_t fd = OpenFile("/proc/self/statm", RdOnly); 769 if (fd == kInvalidFd) 770 return GetRSSFromGetrusage(); 771 char buf[64]; 772 uptr len = internal_read(fd, buf, sizeof(buf) - 1); 773 internal_close(fd); 774 if ((sptr)len <= 0) 775 return 0; 776 buf[len] = 0; 777 // The format of the file is: 778 // 1084 89 69 11 0 79 0 779 // We need the second number which is RSS in pages. 780 char *pos = buf; 781 // Skip the first number. 782 while (*pos >= '0' && *pos <= '9') 783 pos++; 784 // Skip whitespaces. 785 while (!(*pos >= '0' && *pos <= '9') && *pos != 0) 786 pos++; 787 // Read the number. 788 uptr rss = 0; 789 while (*pos >= '0' && *pos <= '9') 790 rss = rss * 10 + *pos++ - '0'; 791 return rss * GetPageSizeCached(); 792 } 793 794 // sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as 795 // they allocate memory. 796 u32 GetNumberOfCPUs() { 797 #if SANITIZER_FREEBSD || SANITIZER_NETBSD 798 u32 ncpu; 799 int req[2]; 800 uptr len = sizeof(ncpu); 801 req[0] = CTL_HW; 802 req[1] = HW_NCPU; 803 CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0); 804 return ncpu; 805 #elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__) 806 // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't 807 // exist in sched.h. That is the case for toolchains generated with older 808 // NDKs. 809 // This code doesn't work on AArch64 because internal_getdents makes use of 810 // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64. 811 uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY); 812 if (internal_iserror(fd)) 813 return 0; 814 InternalMmapVector<u8> buffer(4096); 815 uptr bytes_read = buffer.size(); 816 uptr n_cpus = 0; 817 u8 *d_type; 818 struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read]; 819 while (true) { 820 if ((u8 *)entry >= &buffer[bytes_read]) { 821 bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(), 822 buffer.size()); 823 if (internal_iserror(bytes_read) || !bytes_read) 824 break; 825 entry = (struct linux_dirent *)buffer.data(); 826 } 827 d_type = (u8 *)entry + entry->d_reclen - 1; 828 if (d_type >= &buffer[bytes_read] || 829 (u8 *)&entry->d_name[3] >= &buffer[bytes_read]) 830 break; 831 if (entry->d_ino != 0 && *d_type == DT_DIR) { 832 if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' && 833 entry->d_name[2] == 'u' && 834 entry->d_name[3] >= '0' && entry->d_name[3] <= '9') 835 n_cpus++; 836 } 837 entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen); 838 } 839 internal_close(fd); 840 return n_cpus; 841 #elif SANITIZER_SOLARIS 842 return sysconf(_SC_NPROCESSORS_ONLN); 843 #else 844 cpu_set_t CPUs; 845 CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); 846 return CPU_COUNT(&CPUs); 847 #endif 848 } 849 850 #if SANITIZER_LINUX 851 852 #if SANITIZER_ANDROID 853 static atomic_uint8_t android_log_initialized; 854 855 void AndroidLogInit() { 856 openlog(GetProcessName(), 0, LOG_USER); 857 atomic_store(&android_log_initialized, 1, memory_order_release); 858 } 859 860 static bool ShouldLogAfterPrintf() { 861 return atomic_load(&android_log_initialized, memory_order_acquire); 862 } 863 864 extern "C" SANITIZER_WEAK_ATTRIBUTE 865 int async_safe_write_log(int pri, const char* tag, const char* msg); 866 extern "C" SANITIZER_WEAK_ATTRIBUTE 867 int __android_log_write(int prio, const char* tag, const char* msg); 868 869 // ANDROID_LOG_INFO is 4, but can't be resolved at runtime. 870 #define SANITIZER_ANDROID_LOG_INFO 4 871 872 // async_safe_write_log is a new public version of __libc_write_log that is 873 // used behind syslog. It is preferable to syslog as it will not do any dynamic 874 // memory allocation or formatting. 875 // If the function is not available, syslog is preferred for L+ (it was broken 876 // pre-L) as __android_log_write triggers a racey behavior with the strncpy 877 // interceptor. Fallback to __android_log_write pre-L. 878 void WriteOneLineToSyslog(const char *s) { 879 if (&async_safe_write_log) { 880 async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s); 881 } else if (AndroidGetApiLevel() > ANDROID_KITKAT) { 882 syslog(LOG_INFO, "%s", s); 883 } else { 884 CHECK(&__android_log_write); 885 __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s); 886 } 887 } 888 889 extern "C" SANITIZER_WEAK_ATTRIBUTE 890 void android_set_abort_message(const char *); 891 892 void SetAbortMessage(const char *str) { 893 if (&android_set_abort_message) 894 android_set_abort_message(str); 895 } 896 #else 897 void AndroidLogInit() {} 898 899 static bool ShouldLogAfterPrintf() { return true; } 900 901 void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); } 902 903 void SetAbortMessage(const char *str) {} 904 #endif // SANITIZER_ANDROID 905 906 void LogMessageOnPrintf(const char *str) { 907 if (common_flags()->log_to_syslog && ShouldLogAfterPrintf()) 908 WriteToSyslog(str); 909 } 910 911 #endif // SANITIZER_LINUX 912 913 #if SANITIZER_GLIBC && !SANITIZER_GO 914 // glibc crashes when using clock_gettime from a preinit_array function as the 915 // vDSO function pointers haven't been initialized yet. __progname is 916 // initialized after the vDSO function pointers, so if it exists, is not null 917 // and is not empty, we can use clock_gettime. 918 extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname; 919 inline bool CanUseVDSO() { return &__progname && __progname && *__progname; } 920 921 // MonotonicNanoTime is a timing function that can leverage the vDSO by calling 922 // clock_gettime. real_clock_gettime only exists if clock_gettime is 923 // intercepted, so define it weakly and use it if available. 924 extern "C" SANITIZER_WEAK_ATTRIBUTE 925 int real_clock_gettime(u32 clk_id, void *tp); 926 u64 MonotonicNanoTime() { 927 timespec ts; 928 if (CanUseVDSO()) { 929 if (&real_clock_gettime) 930 real_clock_gettime(CLOCK_MONOTONIC, &ts); 931 else 932 clock_gettime(CLOCK_MONOTONIC, &ts); 933 } else { 934 internal_clock_gettime(CLOCK_MONOTONIC, &ts); 935 } 936 return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec; 937 } 938 #else 939 // Non-glibc & Go always use the regular function. 940 u64 MonotonicNanoTime() { 941 timespec ts; 942 clock_gettime(CLOCK_MONOTONIC, &ts); 943 return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec; 944 } 945 #endif // SANITIZER_GLIBC && !SANITIZER_GO 946 947 void ReExec() { 948 const char *pathname = "/proc/self/exe"; 949 950 #if SANITIZER_FREEBSD 951 for (const auto *aux = __elf_aux_vector; aux->a_type != AT_NULL; aux++) { 952 if (aux->a_type == AT_EXECPATH) { 953 pathname = static_cast<const char *>(aux->a_un.a_ptr); 954 break; 955 } 956 } 957 #elif SANITIZER_NETBSD 958 static const int name[] = { 959 CTL_KERN, 960 KERN_PROC_ARGS, 961 -1, 962 KERN_PROC_PATHNAME, 963 }; 964 char path[400]; 965 uptr len; 966 967 len = sizeof(path); 968 if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1) 969 pathname = path; 970 #elif SANITIZER_SOLARIS 971 pathname = getexecname(); 972 CHECK_NE(pathname, NULL); 973 #elif SANITIZER_USE_GETAUXVAL 974 // Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that 975 // rely on that will fail to load shared libraries. Query AT_EXECFN instead. 976 pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN)); 977 #endif 978 979 uptr rv = internal_execve(pathname, GetArgv(), GetEnviron()); 980 int rverrno; 981 CHECK_EQ(internal_iserror(rv, &rverrno), true); 982 Printf("execve failed, errno %d\n", rverrno); 983 Die(); 984 } 985 986 void UnmapFromTo(uptr from, uptr to) { 987 if (to == from) 988 return; 989 CHECK(to >= from); 990 uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from); 991 if (UNLIKELY(internal_iserror(res))) { 992 Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n", 993 SanitizerToolName, to - from, to - from, (void *)from); 994 CHECK("unable to unmap" && 0); 995 } 996 } 997 998 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, 999 uptr min_shadow_base_alignment, 1000 UNUSED uptr &high_mem_end) { 1001 const uptr granularity = GetMmapGranularity(); 1002 const uptr alignment = 1003 Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); 1004 const uptr left_padding = 1005 Max<uptr>(granularity, 1ULL << min_shadow_base_alignment); 1006 1007 const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity); 1008 const uptr map_size = shadow_size + left_padding + alignment; 1009 1010 const uptr map_start = (uptr)MmapNoAccess(map_size); 1011 CHECK_NE(map_start, ~(uptr)0); 1012 1013 const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); 1014 1015 UnmapFromTo(map_start, shadow_start - left_padding); 1016 UnmapFromTo(shadow_start + shadow_size, map_start + map_size); 1017 1018 return shadow_start; 1019 } 1020 1021 static uptr MmapSharedNoReserve(uptr addr, uptr size) { 1022 return internal_mmap( 1023 reinterpret_cast<void *>(addr), size, PROT_READ | PROT_WRITE, 1024 MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); 1025 } 1026 1027 static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr, 1028 uptr alias_size) { 1029 #if SANITIZER_LINUX 1030 return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size, 1031 MREMAP_MAYMOVE | MREMAP_FIXED, 1032 reinterpret_cast<void *>(alias_addr)); 1033 #else 1034 CHECK(false && "mremap is not supported outside of Linux"); 1035 return 0; 1036 #endif 1037 } 1038 1039 static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) { 1040 uptr total_size = alias_size * num_aliases; 1041 uptr mapped = MmapSharedNoReserve(start_addr, total_size); 1042 CHECK_EQ(mapped, start_addr); 1043 1044 for (uptr i = 1; i < num_aliases; ++i) { 1045 uptr alias_addr = start_addr + i * alias_size; 1046 CHECK_EQ(MremapCreateAlias(start_addr, alias_addr, alias_size), alias_addr); 1047 } 1048 } 1049 1050 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, 1051 uptr num_aliases, uptr ring_buffer_size) { 1052 CHECK_EQ(alias_size & (alias_size - 1), 0); 1053 CHECK_EQ(num_aliases & (num_aliases - 1), 0); 1054 CHECK_EQ(ring_buffer_size & (ring_buffer_size - 1), 0); 1055 1056 const uptr granularity = GetMmapGranularity(); 1057 shadow_size = RoundUpTo(shadow_size, granularity); 1058 CHECK_EQ(shadow_size & (shadow_size - 1), 0); 1059 1060 const uptr alias_region_size = alias_size * num_aliases; 1061 const uptr alignment = 1062 2 * Max(Max(shadow_size, alias_region_size), ring_buffer_size); 1063 const uptr left_padding = ring_buffer_size; 1064 1065 const uptr right_size = alignment; 1066 const uptr map_size = left_padding + 2 * alignment; 1067 1068 const uptr map_start = reinterpret_cast<uptr>(MmapNoAccess(map_size)); 1069 CHECK_NE(map_start, static_cast<uptr>(-1)); 1070 const uptr right_start = RoundUpTo(map_start + left_padding, alignment); 1071 1072 UnmapFromTo(map_start, right_start - left_padding); 1073 UnmapFromTo(right_start + right_size, map_start + map_size); 1074 1075 CreateAliases(right_start + right_size / 2, alias_size, num_aliases); 1076 1077 return right_start; 1078 } 1079 1080 void InitializePlatformCommonFlags(CommonFlags *cf) { 1081 #if SANITIZER_ANDROID 1082 if (&__libc_get_static_tls_bounds == nullptr) 1083 cf->detect_leaks = false; 1084 #endif 1085 } 1086 1087 } // namespace __sanitizer 1088 1089 #endif 1090