xref: /freebsd/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp (revision 1323ec571215a77ddd21294f0871979d5ad6b992)
1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
11 /// FreeBSD-specific code.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17 
18 #include "hwasan.h"
19 #include "hwasan_dynamic_shadow.h"
20 #include "hwasan_interface_internal.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_report.h"
23 #include "hwasan_thread.h"
24 #include "hwasan_thread_list.h"
25 
26 #include <dlfcn.h>
27 #include <elf.h>
28 #include <link.h>
29 #include <pthread.h>
30 #include <signal.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <sys/resource.h>
34 #include <sys/time.h>
35 #include <unistd.h>
36 #include <unwind.h>
37 #include <sys/prctl.h>
38 #include <errno.h>
39 
40 #include "sanitizer_common/sanitizer_common.h"
41 #include "sanitizer_common/sanitizer_procmaps.h"
42 
43 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
44 //
45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
46 //   Not currently tested.
47 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
48 //   Integration tests downstream exist.
49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
50 //    Tested with check-hwasan on x86_64-linux.
51 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
52 //    Tested with check-hwasan on aarch64-linux-android.
53 #if !SANITIZER_ANDROID
54 SANITIZER_INTERFACE_ATTRIBUTE
55 THREADLOCAL uptr __hwasan_tls;
56 #endif
57 
58 namespace __hwasan {
59 
60 // With the zero shadow base we can not actually map pages starting from 0.
61 // This constant is somewhat arbitrary.
62 constexpr uptr kZeroBaseShadowStart = 0;
63 constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
64 
65 static void ProtectGap(uptr addr, uptr size) {
66   __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
67                           kZeroBaseMaxShadowStart);
68 }
69 
70 uptr kLowMemStart;
71 uptr kLowMemEnd;
72 uptr kHighMemStart;
73 uptr kHighMemEnd;
74 
75 static void PrintRange(uptr start, uptr end, const char *name) {
76   Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
77 }
78 
79 static void PrintAddressSpaceLayout() {
80   PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
81   if (kHighShadowEnd + 1 < kHighMemStart)
82     PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
83   else
84     CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
85   PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
86   if (kLowShadowEnd + 1 < kHighShadowStart)
87     PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
88   else
89     CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
90   PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
91   if (kLowMemEnd + 1 < kLowShadowStart)
92     PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
93   else
94     CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
95   PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
96   CHECK_EQ(0, kLowMemStart);
97 }
98 
99 static uptr GetHighMemEnd() {
100   // HighMem covers the upper part of the address space.
101   uptr max_address = GetMaxUserVirtualAddress();
102   // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
103   // properly aligned:
104   max_address |= (GetMmapGranularity() << kShadowScale) - 1;
105   return max_address;
106 }
107 
108 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
109   __hwasan_shadow_memory_dynamic_address =
110       FindDynamicShadowStart(shadow_size_bytes);
111 }
112 
113 void InitializeOsSupport() {
114 #define PR_SET_TAGGED_ADDR_CTRL 55
115 #define PR_GET_TAGGED_ADDR_CTRL 56
116 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
117   // Check we're running on a kernel that can use the tagged address ABI.
118   int local_errno = 0;
119   if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
120                        &local_errno) &&
121       local_errno == EINVAL) {
122 #  if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
123     // Some older Android kernels have the tagged pointer ABI on
124     // unconditionally, and hence don't have the tagged-addr prctl while still
125     // allow the ABI.
126     // If targeting Android and the prctl is not around we assume this is the
127     // case.
128     return;
129 #  else
130     if (flags()->fail_without_syscall_abi) {
131       Printf(
132           "FATAL: "
133           "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
134       Die();
135     }
136 #  endif
137   }
138 
139   // Turn on the tagged address ABI.
140   if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
141                                        PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
142        !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
143 #  if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
144     // Try the new prctl API for Intel LAM.  The API is based on a currently
145     // unsubmitted patch to the Linux kernel (as of May 2021) and is thus
146     // subject to change.  Patch is here:
147     // https://lore.kernel.org/linux-mm/20210205151631.43511-12-kirill.shutemov@linux.intel.com/
148     int tag_bits = kTagBits;
149     int tag_shift = kAddressTagShift;
150     if (!internal_iserror(
151             internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE,
152                            reinterpret_cast<unsigned long>(&tag_bits),
153                            reinterpret_cast<unsigned long>(&tag_shift), 0))) {
154       CHECK_EQ(tag_bits, kTagBits);
155       CHECK_EQ(tag_shift, kAddressTagShift);
156       return;
157     }
158 #  endif  // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
159     if (flags()->fail_without_syscall_abi) {
160       Printf(
161           "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
162           "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
163           "configuration.\n");
164       Die();
165     }
166   }
167 #undef PR_SET_TAGGED_ADDR_CTRL
168 #undef PR_GET_TAGGED_ADDR_CTRL
169 #undef PR_TAGGED_ADDR_ENABLE
170 }
171 
172 bool InitShadow() {
173   // Define the entire memory range.
174   kHighMemEnd = GetHighMemEnd();
175 
176   // Determine shadow memory base offset.
177   InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
178 
179   // Place the low memory first.
180   kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
181   kLowMemStart = 0;
182 
183   // Define the low shadow based on the already placed low memory.
184   kLowShadowEnd = MemToShadow(kLowMemEnd);
185   kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
186 
187   // High shadow takes whatever memory is left up there (making sure it is not
188   // interfering with low memory in the fixed case).
189   kHighShadowEnd = MemToShadow(kHighMemEnd);
190   kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
191 
192   // High memory starts where allocated shadow allows.
193   kHighMemStart = ShadowToMem(kHighShadowStart);
194 
195   // Check the sanity of the defined memory ranges (there might be gaps).
196   CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
197   CHECK_GT(kHighMemStart, kHighShadowEnd);
198   CHECK_GT(kHighShadowEnd, kHighShadowStart);
199   CHECK_GT(kHighShadowStart, kLowMemEnd);
200   CHECK_GT(kLowMemEnd, kLowMemStart);
201   CHECK_GT(kLowShadowEnd, kLowShadowStart);
202   CHECK_GT(kLowShadowStart, kLowMemEnd);
203 
204   if (Verbosity())
205     PrintAddressSpaceLayout();
206 
207   // Reserve shadow memory.
208   ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
209   ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
210 
211   // Protect all the gaps.
212   ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
213   if (kLowMemEnd + 1 < kLowShadowStart)
214     ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
215   if (kLowShadowEnd + 1 < kHighShadowStart)
216     ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
217   if (kHighShadowEnd + 1 < kHighMemStart)
218     ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
219 
220   return true;
221 }
222 
223 void InitThreads() {
224   CHECK(__hwasan_shadow_memory_dynamic_address);
225   uptr guard_page_size = GetMmapGranularity();
226   uptr thread_space_start =
227       __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
228   uptr thread_space_end =
229       __hwasan_shadow_memory_dynamic_address - guard_page_size;
230   ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
231                            "hwasan threads", /*madvise_shadow*/ false);
232   ProtectGap(thread_space_end,
233              __hwasan_shadow_memory_dynamic_address - thread_space_end);
234   InitThreadList(thread_space_start, thread_space_end - thread_space_start);
235   hwasanThreadList().CreateCurrentThread();
236 }
237 
238 bool MemIsApp(uptr p) {
239 // Memory outside the alias range has non-zero tags.
240 #  if !defined(HWASAN_ALIASING_MODE)
241   CHECK(GetTagFromPointer(p) == 0);
242 #  endif
243 
244   return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
245 }
246 
247 void InstallAtExitHandler() {
248   atexit(HwasanAtExit);
249 }
250 
251 // ---------------------- TSD ---------------- {{{1
252 
253 extern "C" void __hwasan_thread_enter() {
254   hwasanThreadList().CreateCurrentThread()->InitRandomState();
255 }
256 
257 extern "C" void __hwasan_thread_exit() {
258   Thread *t = GetCurrentThread();
259   // Make sure that signal handler can not see a stale current thread pointer.
260   atomic_signal_fence(memory_order_seq_cst);
261   if (t)
262     hwasanThreadList().ReleaseThread(t);
263 }
264 
265 #if HWASAN_WITH_INTERCEPTORS
266 static pthread_key_t tsd_key;
267 static bool tsd_key_inited = false;
268 
269 void HwasanTSDThreadInit() {
270   if (tsd_key_inited)
271     CHECK_EQ(0, pthread_setspecific(tsd_key,
272                                     (void *)GetPthreadDestructorIterations()));
273 }
274 
275 void HwasanTSDDtor(void *tsd) {
276   uptr iterations = (uptr)tsd;
277   if (iterations > 1) {
278     CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
279     return;
280   }
281   __hwasan_thread_exit();
282 }
283 
284 void HwasanTSDInit() {
285   CHECK(!tsd_key_inited);
286   tsd_key_inited = true;
287   CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
288 }
289 #else
290 void HwasanTSDInit() {}
291 void HwasanTSDThreadInit() {}
292 #endif
293 
294 #if SANITIZER_ANDROID
295 uptr *GetCurrentThreadLongPtr() {
296   return (uptr *)get_android_tls_ptr();
297 }
298 #else
299 uptr *GetCurrentThreadLongPtr() {
300   return &__hwasan_tls;
301 }
302 #endif
303 
304 #if SANITIZER_ANDROID
305 void AndroidTestTlsSlot() {
306   uptr kMagicValue = 0x010203040A0B0C0D;
307   uptr *tls_ptr = GetCurrentThreadLongPtr();
308   uptr old_value = *tls_ptr;
309   *tls_ptr = kMagicValue;
310   dlerror();
311   if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
312     Printf(
313         "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
314         "for dlerror().\n");
315     Die();
316   }
317   *tls_ptr = old_value;
318 }
319 #else
320 void AndroidTestTlsSlot() {}
321 #endif
322 
323 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
324   // Access type is passed in a platform dependent way (see below) and encoded
325   // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
326   // recoverable. Valid values of Y are 0 to 4, which are interpreted as
327   // log2(access_size), and 0xF, which means that access size is passed via
328   // platform dependent register (see below).
329 #if defined(__aarch64__)
330   // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
331   // access size is stored in X1 register. Access address is always in X0
332   // register.
333   uptr pc = (uptr)info->si_addr;
334   const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
335   if ((code & 0xff00) != 0x900)
336     return AccessInfo{}; // Not ours.
337 
338   const bool is_store = code & 0x10;
339   const bool recover = code & 0x20;
340   const uptr addr = uc->uc_mcontext.regs[0];
341   const unsigned size_log = code & 0xf;
342   if (size_log > 4 && size_log != 0xf)
343     return AccessInfo{}; // Not ours.
344   const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
345 
346 #elif defined(__x86_64__)
347   // Access type is encoded in the instruction following INT3 as
348   // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
349   // RSI register. Access address is always in RDI register.
350   uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
351   uint8_t *nop = (uint8_t*)pc;
352   if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40  ||
353       *(nop + 3) < 0x40)
354     return AccessInfo{}; // Not ours.
355   const unsigned code = *(nop + 3);
356 
357   const bool is_store = code & 0x10;
358   const bool recover = code & 0x20;
359   const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
360   const unsigned size_log = code & 0xf;
361   if (size_log > 4 && size_log != 0xf)
362     return AccessInfo{}; // Not ours.
363   const uptr size =
364       size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
365 
366 #else
367 # error Unsupported architecture
368 #endif
369 
370   return AccessInfo{addr, size, is_store, !is_store, recover};
371 }
372 
373 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
374   AccessInfo ai = GetAccessInfo(info, uc);
375   if (!ai.is_store && !ai.is_load)
376     return false;
377 
378   SignalContext sig{info, uc};
379   HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
380 
381 #if defined(__aarch64__)
382   uc->uc_mcontext.pc += 4;
383 #elif defined(__x86_64__)
384 #else
385 # error Unsupported architecture
386 #endif
387   return true;
388 }
389 
390 static void OnStackUnwind(const SignalContext &sig, const void *,
391                           BufferedStackTrace *stack) {
392   stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
393                 common_flags()->fast_unwind_on_fatal);
394 }
395 
396 void HwasanOnDeadlySignal(int signo, void *info, void *context) {
397   // Probably a tag mismatch.
398   if (signo == SIGTRAP)
399     if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
400       return;
401 
402   HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
403 }
404 
405 void Thread::InitStackAndTls(const InitState *) {
406   uptr tls_size;
407   uptr stack_size;
408   GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
409                        &tls_size);
410   stack_top_ = stack_bottom_ + stack_size;
411   tls_end_ = tls_begin_ + tls_size;
412 }
413 
414 uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
415   CHECK(IsAligned(p, kShadowAlignment));
416   CHECK(IsAligned(size, kShadowAlignment));
417   uptr shadow_start = MemToShadow(p);
418   uptr shadow_size = MemToShadowSize(size);
419 
420   uptr page_size = GetPageSizeCached();
421   uptr page_start = RoundUpTo(shadow_start, page_size);
422   uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
423   uptr threshold = common_flags()->clear_shadow_mmap_threshold;
424   if (SANITIZER_LINUX &&
425       UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
426     internal_memset((void *)shadow_start, tag, page_start - shadow_start);
427     internal_memset((void *)page_end, tag,
428                     shadow_start + shadow_size - page_end);
429     // For an anonymous private mapping MADV_DONTNEED will return a zero page on
430     // Linux.
431     ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
432   } else {
433     internal_memset((void *)shadow_start, tag, shadow_size);
434   }
435   return AddTagToPointer(p, tag);
436 }
437 
438 } // namespace __hwasan
439 
440 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
441