xref: /freebsd/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
11 /// FreeBSD-specific code.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17 
18 #  include <dlfcn.h>
19 #  include <elf.h>
20 #  include <errno.h>
21 #  include <link.h>
22 #  include <pthread.h>
23 #  include <signal.h>
24 #  include <stdio.h>
25 #  include <stdlib.h>
26 #  include <sys/prctl.h>
27 #  include <sys/resource.h>
28 #  include <sys/time.h>
29 #  include <unistd.h>
30 #  include <unwind.h>
31 
32 #  include "hwasan.h"
33 #  include "hwasan_dynamic_shadow.h"
34 #  include "hwasan_interface_internal.h"
35 #  include "hwasan_mapping.h"
36 #  include "hwasan_report.h"
37 #  include "hwasan_thread.h"
38 #  include "hwasan_thread_list.h"
39 #  include "sanitizer_common/sanitizer_common.h"
40 #  include "sanitizer_common/sanitizer_procmaps.h"
41 #  include "sanitizer_common/sanitizer_stackdepot.h"
42 
43 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
44 //
45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
46 //   Not currently tested.
47 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
48 //   Integration tests downstream exist.
49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
50 //    Tested with check-hwasan on x86_64-linux.
51 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
52 //    Tested with check-hwasan on aarch64-linux-android.
53 #  if !SANITIZER_ANDROID
54 SANITIZER_INTERFACE_ATTRIBUTE
55 THREADLOCAL uptr __hwasan_tls;
56 #  endif
57 
58 namespace __hwasan {
59 
60 // With the zero shadow base we can not actually map pages starting from 0.
61 // This constant is somewhat arbitrary.
62 constexpr uptr kZeroBaseShadowStart = 0;
63 constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
64 
ProtectGap(uptr addr,uptr size)65 static void ProtectGap(uptr addr, uptr size) {
66   __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
67                           kZeroBaseMaxShadowStart);
68 }
69 
70 uptr kLowMemStart;
71 uptr kLowMemEnd;
72 uptr kHighMemStart;
73 uptr kHighMemEnd;
74 
PrintRange(uptr start,uptr end,const char * name)75 static void PrintRange(uptr start, uptr end, const char *name) {
76   Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
77 }
78 
PrintAddressSpaceLayout()79 static void PrintAddressSpaceLayout() {
80   PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
81   if (kHighShadowEnd + 1 < kHighMemStart)
82     PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
83   else
84     CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
85   PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
86   if (kLowShadowEnd + 1 < kHighShadowStart)
87     PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
88   else
89     CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
90   PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
91   if (kLowMemEnd + 1 < kLowShadowStart)
92     PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
93   else
94     CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
95   PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
96   CHECK_EQ(0, kLowMemStart);
97 }
98 
GetHighMemEnd()99 static uptr GetHighMemEnd() {
100   // HighMem covers the upper part of the address space.
101   uptr max_address = GetMaxUserVirtualAddress();
102   // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
103   // properly aligned:
104   max_address |= (GetMmapGranularity() << kShadowScale) - 1;
105   return max_address;
106 }
107 
InitializeShadowBaseAddress(uptr shadow_size_bytes)108 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
109   // FIXME: Android should init flags before shadow.
110   if (!SANITIZER_ANDROID && flags()->fixed_shadow_base != (uptr)-1) {
111     __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;
112     uptr beg = __hwasan_shadow_memory_dynamic_address;
113     uptr end = beg + shadow_size_bytes;
114     if (!MemoryRangeIsAvailable(beg, end)) {
115       Report(
116           "FATAL: HWAddressSanitizer: Shadow range %p-%p is not available.\n",
117           (void *)beg, (void *)end);
118       DumpProcessMap();
119       CHECK(MemoryRangeIsAvailable(beg, end));
120     }
121   } else {
122     __hwasan_shadow_memory_dynamic_address =
123         FindDynamicShadowStart(shadow_size_bytes);
124   }
125 }
126 
MaybeDieIfNoTaggingAbi(const char * message)127 static void MaybeDieIfNoTaggingAbi(const char *message) {
128   if (!flags()->fail_without_syscall_abi)
129     return;
130   Printf("FATAL: %s\n", message);
131   Die();
132 }
133 
134 #  define PR_SET_TAGGED_ADDR_CTRL 55
135 #  define PR_GET_TAGGED_ADDR_CTRL 56
136 #  define PR_TAGGED_ADDR_ENABLE (1UL << 0)
137 #  define ARCH_GET_UNTAG_MASK 0x4001
138 #  define ARCH_ENABLE_TAGGED_ADDR 0x4002
139 #  define ARCH_GET_MAX_TAG_BITS 0x4003
140 
CanUseTaggingAbi()141 static bool CanUseTaggingAbi() {
142 #  if defined(__x86_64__)
143   unsigned long num_bits = 0;
144   // Check for x86 LAM support. This API is based on a currently unsubmitted
145   // patch to the Linux kernel (as of August 2022) and is thus subject to
146   // change. The patch is here:
147   // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
148   //
149   // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
150   // bits the user can request, or zero if LAM is not supported by the hardware.
151   if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
152                                            reinterpret_cast<uptr>(&num_bits))))
153     return false;
154   // The platform must provide enough bits for HWASan tags.
155   if (num_bits < kTagBits)
156     return false;
157   return true;
158 #  else
159   // Check for ARM TBI support.
160   return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
161 #  endif // __x86_64__
162 }
163 
EnableTaggingAbi()164 static bool EnableTaggingAbi() {
165 #  if defined(__x86_64__)
166   // Enable x86 LAM tagging for the process.
167   //
168   // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
169   // tag bits requested by the user does not exceed that provided by the system.
170   // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
171   // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
172   // is not supported by the hardware.
173   if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))
174     return false;
175   unsigned long mask = 0;
176   // Make sure the tag bits are where we expect them to be.
177   if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,
178                                            reinterpret_cast<uptr>(&mask))))
179     return false;
180   // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
181   // bits. Therefore these masks must not overlap.
182   if (mask & kAddressTagMask)
183     return false;
184   return true;
185 #  else
186   // Enable ARM TBI tagging for the process. If for some reason tagging is not
187   // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
188   // -EINVAL.
189   if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
190                                       PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
191     return false;
192   // Ensure that TBI is enabled.
193   if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
194       PR_TAGGED_ADDR_ENABLE)
195     return false;
196   return true;
197 #  endif // __x86_64__
198 }
199 
InitializeOsSupport()200 void InitializeOsSupport() {
201   // Check we're running on a kernel that can use the tagged address ABI.
202   bool has_abi = CanUseTaggingAbi();
203 
204   if (!has_abi) {
205 #  if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
206     // Some older Android kernels have the tagged pointer ABI on
207     // unconditionally, and hence don't have the tagged-addr prctl while still
208     // allow the ABI.
209     // If targeting Android and the prctl is not around we assume this is the
210     // case.
211     return;
212 #  else
213     MaybeDieIfNoTaggingAbi(
214         "HWAddressSanitizer requires a kernel with tagged address ABI.");
215 #  endif
216   }
217 
218   if (EnableTaggingAbi())
219     return;
220 
221 #  if SANITIZER_ANDROID
222   MaybeDieIfNoTaggingAbi(
223       "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
224       "Check the `sysctl abi.tagged_addr_disabled` configuration.");
225 #  else
226   MaybeDieIfNoTaggingAbi(
227       "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
228 #  endif
229 }
230 
InitShadow()231 bool InitShadow() {
232   // Define the entire memory range.
233   kHighMemEnd = GetHighMemEnd();
234 
235   // Determine shadow memory base offset.
236   InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
237 
238   // Place the low memory first.
239   kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
240   kLowMemStart = 0;
241 
242   // Define the low shadow based on the already placed low memory.
243   kLowShadowEnd = MemToShadow(kLowMemEnd);
244   kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
245 
246   // High shadow takes whatever memory is left up there (making sure it is not
247   // interfering with low memory in the fixed case).
248   kHighShadowEnd = MemToShadow(kHighMemEnd);
249   kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
250 
251   // High memory starts where allocated shadow allows.
252   kHighMemStart = ShadowToMem(kHighShadowStart);
253 
254   // Check the sanity of the defined memory ranges (there might be gaps).
255   CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
256   CHECK_GT(kHighMemStart, kHighShadowEnd);
257   CHECK_GT(kHighShadowEnd, kHighShadowStart);
258   CHECK_GT(kHighShadowStart, kLowMemEnd);
259   CHECK_GT(kLowMemEnd, kLowMemStart);
260   CHECK_GT(kLowShadowEnd, kLowShadowStart);
261   CHECK_GT(kLowShadowStart, kLowMemEnd);
262 
263   // Reserve shadow memory.
264   ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
265   ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
266 
267   // Protect all the gaps.
268   ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
269   if (kLowMemEnd + 1 < kLowShadowStart)
270     ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
271   if (kLowShadowEnd + 1 < kHighShadowStart)
272     ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
273   if (kHighShadowEnd + 1 < kHighMemStart)
274     ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
275 
276   if (Verbosity())
277     PrintAddressSpaceLayout();
278 
279   return true;
280 }
281 
InitThreads()282 void InitThreads() {
283   CHECK(__hwasan_shadow_memory_dynamic_address);
284   uptr guard_page_size = GetMmapGranularity();
285   uptr thread_space_start =
286       __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
287   uptr thread_space_end =
288       __hwasan_shadow_memory_dynamic_address - guard_page_size;
289   ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
290                            "hwasan threads", /*madvise_shadow*/ false);
291   ProtectGap(thread_space_end,
292              __hwasan_shadow_memory_dynamic_address - thread_space_end);
293   InitThreadList(thread_space_start, thread_space_end - thread_space_start);
294   hwasanThreadList().CreateCurrentThread();
295 }
296 
MemIsApp(uptr p)297 bool MemIsApp(uptr p) {
298 // Memory outside the alias range has non-zero tags.
299 #  if !defined(HWASAN_ALIASING_MODE)
300   CHECK_EQ(GetTagFromPointer(p), 0);
301 #  endif
302 
303   return (p >= kHighMemStart && p <= kHighMemEnd) ||
304          (p >= kLowMemStart && p <= kLowMemEnd);
305 }
306 
InstallAtExitHandler()307 void InstallAtExitHandler() { atexit(HwasanAtExit); }
308 
309 // ---------------------- TSD ---------------- {{{1
310 
311 #  if HWASAN_WITH_INTERCEPTORS
312 static pthread_key_t tsd_key;
313 static bool tsd_key_inited = false;
314 
HwasanTSDThreadInit()315 void HwasanTSDThreadInit() {
316   if (tsd_key_inited)
317     CHECK_EQ(0, pthread_setspecific(tsd_key,
318                                     (void *)GetPthreadDestructorIterations()));
319 }
320 
HwasanTSDDtor(void * tsd)321 void HwasanTSDDtor(void *tsd) {
322   uptr iterations = (uptr)tsd;
323   if (iterations > 1) {
324     CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
325     return;
326   }
327   __hwasan_thread_exit();
328 }
329 
HwasanTSDInit()330 void HwasanTSDInit() {
331   CHECK(!tsd_key_inited);
332   tsd_key_inited = true;
333   CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
334 }
335 #  else
HwasanTSDInit()336 void HwasanTSDInit() {}
HwasanTSDThreadInit()337 void HwasanTSDThreadInit() {}
338 #  endif
339 
340 #  if SANITIZER_ANDROID
GetCurrentThreadLongPtr()341 uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
342 #  else
GetCurrentThreadLongPtr()343 uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
344 #  endif
345 
346 #  if SANITIZER_ANDROID
AndroidTestTlsSlot()347 void AndroidTestTlsSlot() {
348   uptr kMagicValue = 0x010203040A0B0C0D;
349   uptr *tls_ptr = GetCurrentThreadLongPtr();
350   uptr old_value = *tls_ptr;
351   *tls_ptr = kMagicValue;
352   dlerror();
353   if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
354     Printf(
355         "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
356         "for dlerror().\n");
357     Die();
358   }
359   *tls_ptr = old_value;
360 }
361 #  else
AndroidTestTlsSlot()362 void AndroidTestTlsSlot() {}
363 #  endif
364 
GetAccessInfo(siginfo_t * info,ucontext_t * uc)365 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
366   // Access type is passed in a platform dependent way (see below) and encoded
367   // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
368   // recoverable. Valid values of Y are 0 to 4, which are interpreted as
369   // log2(access_size), and 0xF, which means that access size is passed via
370   // platform dependent register (see below).
371 #  if defined(__aarch64__)
372   // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
373   // access size is stored in X1 register. Access address is always in X0
374   // register.
375   uptr pc = (uptr)info->si_addr;
376   const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
377   if ((code & 0xff00) != 0x900)
378     return AccessInfo{};  // Not ours.
379 
380   const bool is_store = code & 0x10;
381   const bool recover = code & 0x20;
382   const uptr addr = uc->uc_mcontext.regs[0];
383   const unsigned size_log = code & 0xf;
384   if (size_log > 4 && size_log != 0xf)
385     return AccessInfo{};  // Not ours.
386   const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
387 
388 #  elif defined(__x86_64__)
389   // Access type is encoded in the instruction following INT3 as
390   // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
391   // RSI register. Access address is always in RDI register.
392   uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
393   uint8_t *nop = (uint8_t *)pc;
394   if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
395       *(nop + 3) < 0x40)
396     return AccessInfo{};  // Not ours.
397   const unsigned code = *(nop + 3);
398 
399   const bool is_store = code & 0x10;
400   const bool recover = code & 0x20;
401   const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
402   const unsigned size_log = code & 0xf;
403   if (size_log > 4 && size_log != 0xf)
404     return AccessInfo{};  // Not ours.
405   const uptr size =
406       size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
407 
408 #  elif SANITIZER_RISCV64
409   // Access type is encoded in the instruction following EBREAK as
410   // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
411   // X11 register. Access address is always in X10 register.
412   uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
413   uint8_t byte1 = *((u8 *)(pc + 0));
414   uint8_t byte2 = *((u8 *)(pc + 1));
415   uint8_t byte3 = *((u8 *)(pc + 2));
416   uint8_t byte4 = *((u8 *)(pc + 3));
417   uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
418   bool isFaultShort = false;
419   bool isEbreak = (ebreak == 0x100073);
420   bool isShortEbreak = false;
421 #    if defined(__riscv_compressed)
422   isFaultShort = ((ebreak & 0x3) != 0x3);
423   isShortEbreak = ((ebreak & 0xffff) == 0x9002);
424 #    endif
425   // faulted insn is not ebreak, not our case
426   if (!(isEbreak || isShortEbreak))
427     return AccessInfo{};
428   // advance pc to point after ebreak and reconstruct addi instruction
429   pc += isFaultShort ? 2 : 4;
430   byte1 = *((u8 *)(pc + 0));
431   byte2 = *((u8 *)(pc + 1));
432   byte3 = *((u8 *)(pc + 2));
433   byte4 = *((u8 *)(pc + 3));
434   // reconstruct instruction
435   uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
436   // check if this is really 32 bit instruction
437   // code is encoded in top 12 bits, since instruction is supposed to be with
438   // imm
439   const unsigned code = (instr >> 20) & 0xffff;
440   const uptr addr = uc->uc_mcontext.__gregs[10];
441   const bool is_store = code & 0x10;
442   const bool recover = code & 0x20;
443   const unsigned size_log = code & 0xf;
444   if (size_log > 4 && size_log != 0xf)
445     return AccessInfo{};  // Not our case
446   const uptr size =
447       size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
448 
449 #  else
450 #    error Unsupported architecture
451 #  endif
452 
453   return AccessInfo{addr, size, is_store, !is_store, recover};
454 }
455 
HwasanOnSIGTRAP(int signo,siginfo_t * info,ucontext_t * uc)456 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
457   AccessInfo ai = GetAccessInfo(info, uc);
458   if (!ai.is_store && !ai.is_load)
459     return false;
460 
461   SignalContext sig{info, uc};
462   HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
463 
464 #  if defined(__aarch64__)
465   uc->uc_mcontext.pc += 4;
466 #  elif defined(__x86_64__)
467 #  elif SANITIZER_RISCV64
468   // pc points to EBREAK which is 2 bytes long
469   uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
470   uint8_t byte1 = (uint8_t)(*(exception_source + 0));
471   uint8_t byte2 = (uint8_t)(*(exception_source + 1));
472   uint8_t byte3 = (uint8_t)(*(exception_source + 2));
473   uint8_t byte4 = (uint8_t)(*(exception_source + 3));
474   uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
475   bool isFaultShort = false;
476 #    if defined(__riscv_compressed)
477   isFaultShort = ((faulted & 0x3) != 0x3);
478 #    endif
479   uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
480 #  else
481 #    error Unsupported architecture
482 #  endif
483   return true;
484 }
485 
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)486 static void OnStackUnwind(const SignalContext &sig, const void *,
487                           BufferedStackTrace *stack) {
488   stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
489                 common_flags()->fast_unwind_on_fatal);
490 }
491 
HwasanOnDeadlySignal(int signo,void * info,void * context)492 void HwasanOnDeadlySignal(int signo, void *info, void *context) {
493   // Probably a tag mismatch.
494   if (signo == SIGTRAP)
495     if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
496       return;
497 
498   HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
499 }
500 
InitStackAndTls(const InitState *)501 void Thread::InitStackAndTls(const InitState *) {
502   uptr tls_size;
503   uptr stack_size;
504   GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
505                        &tls_size);
506   stack_top_ = stack_bottom_ + stack_size;
507   tls_end_ = tls_begin_ + tls_size;
508 }
509 
TagMemoryAligned(uptr p,uptr size,tag_t tag)510 uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
511   CHECK(IsAligned(p, kShadowAlignment));
512   CHECK(IsAligned(size, kShadowAlignment));
513   uptr shadow_start = MemToShadow(p);
514   uptr shadow_size = MemToShadowSize(size);
515 
516   uptr page_size = GetPageSizeCached();
517   uptr page_start = RoundUpTo(shadow_start, page_size);
518   uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
519   uptr threshold = common_flags()->clear_shadow_mmap_threshold;
520   if (SANITIZER_LINUX &&
521       UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
522     internal_memset((void *)shadow_start, tag, page_start - shadow_start);
523     internal_memset((void *)page_end, tag,
524                     shadow_start + shadow_size - page_end);
525     // For an anonymous private mapping MADV_DONTNEED will return a zero page on
526     // Linux.
527     ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
528   } else {
529     internal_memset((void *)shadow_start, tag, shadow_size);
530   }
531   return AddTagToPointer(p, tag);
532 }
533 
BeforeFork()534 static void BeforeFork() {
535   if (CAN_SANITIZE_LEAKS) {
536     __lsan::LockGlobal();
537   }
538   // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
539   // stuff we need.
540   __lsan::LockThreads();
541   __lsan::LockAllocator();
542   StackDepotLockBeforeFork();
543 }
544 
AfterFork(bool fork_child)545 static void AfterFork(bool fork_child) {
546   StackDepotUnlockAfterFork(fork_child);
547   // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
548   // the stuff we need.
549   __lsan::UnlockAllocator();
550   __lsan::UnlockThreads();
551   if (CAN_SANITIZE_LEAKS) {
552     __lsan::UnlockGlobal();
553   }
554 }
555 
HwasanInstallAtForkHandler()556 void HwasanInstallAtForkHandler() {
557   pthread_atfork(
558       &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
559       []() { AfterFork(/* fork_child= */ true); });
560 }
561 
InstallAtExitCheckLeaks()562 void InstallAtExitCheckLeaks() {
563   if (CAN_SANITIZE_LEAKS) {
564     if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
565       if (flags()->halt_on_error)
566         Atexit(__lsan::DoLeakCheck);
567       else
568         Atexit(__lsan::DoRecoverableLeakCheckVoid);
569     }
570   }
571 }
572 
573 }  // namespace __hwasan
574 
575 using namespace __hwasan;
576 
__hwasan_thread_enter()577 extern "C" void __hwasan_thread_enter() {
578   hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
579 }
580 
__hwasan_thread_exit()581 extern "C" void __hwasan_thread_exit() {
582   Thread *t = GetCurrentThread();
583   // Make sure that signal handler can not see a stale current thread pointer.
584   atomic_signal_fence(memory_order_seq_cst);
585   if (t) {
586     // Block async signals on the thread as the handler can be instrumented.
587     // After this point instrumented code can't access essential data from TLS
588     // and will crash.
589     // Bionic already calls __hwasan_thread_exit with blocked signals.
590     if (SANITIZER_GLIBC)
591       BlockSignals();
592     hwasanThreadList().ReleaseThread(t);
593   }
594 }
595 
596 #endif  // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
597