xref: /freebsd/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
11 /// FreeBSD-specific code.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17 
18 #include "hwasan.h"
19 #include "hwasan_dynamic_shadow.h"
20 #include "hwasan_interface_internal.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_report.h"
23 #include "hwasan_thread.h"
24 #include "hwasan_thread_list.h"
25 
26 #include <dlfcn.h>
27 #include <elf.h>
28 #include <link.h>
29 #include <pthread.h>
30 #include <signal.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <sys/resource.h>
34 #include <sys/time.h>
35 #include <unistd.h>
36 #include <unwind.h>
37 #include <sys/prctl.h>
38 #include <errno.h>
39 
40 #include "sanitizer_common/sanitizer_common.h"
41 #include "sanitizer_common/sanitizer_procmaps.h"
42 
43 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
44 //
45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
46 //   Not currently tested.
47 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
48 //   Integration tests downstream exist.
49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
50 //    Tested with check-hwasan on x86_64-linux.
51 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
52 //    Tested with check-hwasan on aarch64-linux-android.
53 #if !SANITIZER_ANDROID
54 SANITIZER_INTERFACE_ATTRIBUTE
55 THREADLOCAL uptr __hwasan_tls;
56 #endif
57 
58 namespace __hwasan {
59 
60 static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
61   CHECK_EQ((beg % GetMmapGranularity()), 0);
62   CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
63   uptr size = end - beg + 1;
64   DecreaseTotalMmap(size);  // Don't count the shadow against mmap_limit_mb.
65   if (!MmapFixedNoReserve(beg, size, name)) {
66     Report(
67         "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
68         "Perhaps you're using ulimit -v\n",
69         size);
70     Abort();
71   }
72 }
73 
74 static void ProtectGap(uptr addr, uptr size) {
75   if (!size)
76     return;
77   void *res = MmapFixedNoAccess(addr, size, "shadow gap");
78   if (addr == (uptr)res)
79     return;
80   // A few pages at the start of the address space can not be protected.
81   // But we really want to protect as much as possible, to prevent this memory
82   // being returned as a result of a non-FIXED mmap().
83   if (addr == 0) {
84     uptr step = GetMmapGranularity();
85     while (size > step) {
86       addr += step;
87       size -= step;
88       void *res = MmapFixedNoAccess(addr, size, "shadow gap");
89       if (addr == (uptr)res)
90         return;
91     }
92   }
93 
94   Report(
95       "ERROR: Failed to protect shadow gap [%p, %p]. "
96       "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr,
97       (void *)(addr + size));
98   DumpProcessMap();
99   Die();
100 }
101 
102 static uptr kLowMemStart;
103 static uptr kLowMemEnd;
104 static uptr kLowShadowEnd;
105 static uptr kLowShadowStart;
106 static uptr kHighShadowStart;
107 static uptr kHighShadowEnd;
108 static uptr kHighMemStart;
109 static uptr kHighMemEnd;
110 
111 static void PrintRange(uptr start, uptr end, const char *name) {
112   Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
113 }
114 
115 static void PrintAddressSpaceLayout() {
116   PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
117   if (kHighShadowEnd + 1 < kHighMemStart)
118     PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
119   else
120     CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
121   PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
122   if (kLowShadowEnd + 1 < kHighShadowStart)
123     PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
124   else
125     CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
126   PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
127   if (kLowMemEnd + 1 < kLowShadowStart)
128     PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
129   else
130     CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
131   PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
132   CHECK_EQ(0, kLowMemStart);
133 }
134 
135 static uptr GetHighMemEnd() {
136   // HighMem covers the upper part of the address space.
137   uptr max_address = GetMaxUserVirtualAddress();
138   // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
139   // properly aligned:
140   max_address |= (GetMmapGranularity() << kShadowScale) - 1;
141   return max_address;
142 }
143 
144 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
145   __hwasan_shadow_memory_dynamic_address =
146       FindDynamicShadowStart(shadow_size_bytes);
147 }
148 
149 void InitPrctl() {
150 #define PR_SET_TAGGED_ADDR_CTRL 55
151 #define PR_GET_TAGGED_ADDR_CTRL 56
152 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
153   // Check we're running on a kernel that can use the tagged address ABI.
154   if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 &&
155       errno == EINVAL) {
156 #if SANITIZER_ANDROID
157     // Some older Android kernels have the tagged pointer ABI on
158     // unconditionally, and hence don't have the tagged-addr prctl while still
159     // allow the ABI.
160     // If targeting Android and the prctl is not around we assume this is the
161     // case.
162     return;
163 #else
164     Printf(
165         "FATAL: "
166         "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
167     Die();
168 #endif
169   }
170 
171   // Turn on the tagged address ABI.
172   if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) ==
173           (uptr)-1 ||
174       !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) {
175     Printf(
176         "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
177         "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
178         "configuration.\n");
179     Die();
180   }
181 #undef PR_SET_TAGGED_ADDR_CTRL
182 #undef PR_GET_TAGGED_ADDR_CTRL
183 #undef PR_TAGGED_ADDR_ENABLE
184 }
185 
186 bool InitShadow() {
187   // Define the entire memory range.
188   kHighMemEnd = GetHighMemEnd();
189 
190   // Determine shadow memory base offset.
191   InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
192 
193   // Place the low memory first.
194   kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
195   kLowMemStart = 0;
196 
197   // Define the low shadow based on the already placed low memory.
198   kLowShadowEnd = MemToShadow(kLowMemEnd);
199   kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
200 
201   // High shadow takes whatever memory is left up there (making sure it is not
202   // interfering with low memory in the fixed case).
203   kHighShadowEnd = MemToShadow(kHighMemEnd);
204   kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
205 
206   // High memory starts where allocated shadow allows.
207   kHighMemStart = ShadowToMem(kHighShadowStart);
208 
209   // Check the sanity of the defined memory ranges (there might be gaps).
210   CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
211   CHECK_GT(kHighMemStart, kHighShadowEnd);
212   CHECK_GT(kHighShadowEnd, kHighShadowStart);
213   CHECK_GT(kHighShadowStart, kLowMemEnd);
214   CHECK_GT(kLowMemEnd, kLowMemStart);
215   CHECK_GT(kLowShadowEnd, kLowShadowStart);
216   CHECK_GT(kLowShadowStart, kLowMemEnd);
217 
218   if (Verbosity())
219     PrintAddressSpaceLayout();
220 
221   // Reserve shadow memory.
222   ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
223   ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
224 
225   // Protect all the gaps.
226   ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
227   if (kLowMemEnd + 1 < kLowShadowStart)
228     ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
229   if (kLowShadowEnd + 1 < kHighShadowStart)
230     ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
231   if (kHighShadowEnd + 1 < kHighMemStart)
232     ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
233 
234   return true;
235 }
236 
237 void InitThreads() {
238   CHECK(__hwasan_shadow_memory_dynamic_address);
239   uptr guard_page_size = GetMmapGranularity();
240   uptr thread_space_start =
241       __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
242   uptr thread_space_end =
243       __hwasan_shadow_memory_dynamic_address - guard_page_size;
244   ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
245                            "hwasan threads");
246   ProtectGap(thread_space_end,
247              __hwasan_shadow_memory_dynamic_address - thread_space_end);
248   InitThreadList(thread_space_start, thread_space_end - thread_space_start);
249 }
250 
251 static void MadviseShadowRegion(uptr beg, uptr end) {
252   uptr size = end - beg + 1;
253   SetShadowRegionHugePageMode(beg, size);
254   if (common_flags()->use_madv_dontdump)
255     DontDumpShadowMemory(beg, size);
256 }
257 
258 void MadviseShadow() {
259   MadviseShadowRegion(kLowShadowStart, kLowShadowEnd);
260   MadviseShadowRegion(kHighShadowStart, kHighShadowEnd);
261 }
262 
263 bool MemIsApp(uptr p) {
264   CHECK(GetTagFromPointer(p) == 0);
265   return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
266 }
267 
268 static void HwasanAtExit(void) {
269   if (common_flags()->print_module_map)
270     DumpProcessMap();
271   if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
272     ReportStats();
273   if (hwasan_report_count > 0) {
274     // ReportAtExitStatistics();
275     if (common_flags()->exitcode)
276       internal__exit(common_flags()->exitcode);
277   }
278 }
279 
280 void InstallAtExitHandler() {
281   atexit(HwasanAtExit);
282 }
283 
284 // ---------------------- TSD ---------------- {{{1
285 
286 extern "C" void __hwasan_thread_enter() {
287   hwasanThreadList().CreateCurrentThread()->InitRandomState();
288 }
289 
290 extern "C" void __hwasan_thread_exit() {
291   Thread *t = GetCurrentThread();
292   // Make sure that signal handler can not see a stale current thread pointer.
293   atomic_signal_fence(memory_order_seq_cst);
294   if (t)
295     hwasanThreadList().ReleaseThread(t);
296 }
297 
298 #if HWASAN_WITH_INTERCEPTORS
299 static pthread_key_t tsd_key;
300 static bool tsd_key_inited = false;
301 
302 void HwasanTSDThreadInit() {
303   if (tsd_key_inited)
304     CHECK_EQ(0, pthread_setspecific(tsd_key,
305                                     (void *)GetPthreadDestructorIterations()));
306 }
307 
308 void HwasanTSDDtor(void *tsd) {
309   uptr iterations = (uptr)tsd;
310   if (iterations > 1) {
311     CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
312     return;
313   }
314   __hwasan_thread_exit();
315 }
316 
317 void HwasanTSDInit() {
318   CHECK(!tsd_key_inited);
319   tsd_key_inited = true;
320   CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
321 }
322 #else
323 void HwasanTSDInit() {}
324 void HwasanTSDThreadInit() {}
325 #endif
326 
327 #if SANITIZER_ANDROID
328 uptr *GetCurrentThreadLongPtr() {
329   return (uptr *)get_android_tls_ptr();
330 }
331 #else
332 uptr *GetCurrentThreadLongPtr() {
333   return &__hwasan_tls;
334 }
335 #endif
336 
337 #if SANITIZER_ANDROID
338 void AndroidTestTlsSlot() {
339   uptr kMagicValue = 0x010203040A0B0C0D;
340   uptr *tls_ptr = GetCurrentThreadLongPtr();
341   uptr old_value = *tls_ptr;
342   *tls_ptr = kMagicValue;
343   dlerror();
344   if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
345     Printf(
346         "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
347         "for dlerror().\n");
348     Die();
349   }
350   *tls_ptr = old_value;
351 }
352 #else
353 void AndroidTestTlsSlot() {}
354 #endif
355 
356 Thread *GetCurrentThread() {
357   uptr *ThreadLongPtr = GetCurrentThreadLongPtr();
358   if (UNLIKELY(*ThreadLongPtr == 0))
359     return nullptr;
360   auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr;
361   return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next());
362 }
363 
364 struct AccessInfo {
365   uptr addr;
366   uptr size;
367   bool is_store;
368   bool is_load;
369   bool recover;
370 };
371 
372 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
373   // Access type is passed in a platform dependent way (see below) and encoded
374   // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
375   // recoverable. Valid values of Y are 0 to 4, which are interpreted as
376   // log2(access_size), and 0xF, which means that access size is passed via
377   // platform dependent register (see below).
378 #if defined(__aarch64__)
379   // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
380   // access size is stored in X1 register. Access address is always in X0
381   // register.
382   uptr pc = (uptr)info->si_addr;
383   const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
384   if ((code & 0xff00) != 0x900)
385     return AccessInfo{}; // Not ours.
386 
387   const bool is_store = code & 0x10;
388   const bool recover = code & 0x20;
389   const uptr addr = uc->uc_mcontext.regs[0];
390   const unsigned size_log = code & 0xf;
391   if (size_log > 4 && size_log != 0xf)
392     return AccessInfo{}; // Not ours.
393   const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
394 
395 #elif defined(__x86_64__)
396   // Access type is encoded in the instruction following INT3 as
397   // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
398   // RSI register. Access address is always in RDI register.
399   uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
400   uint8_t *nop = (uint8_t*)pc;
401   if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40  ||
402       *(nop + 3) < 0x40)
403     return AccessInfo{}; // Not ours.
404   const unsigned code = *(nop + 3);
405 
406   const bool is_store = code & 0x10;
407   const bool recover = code & 0x20;
408   const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
409   const unsigned size_log = code & 0xf;
410   if (size_log > 4 && size_log != 0xf)
411     return AccessInfo{}; // Not ours.
412   const uptr size =
413       size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
414 
415 #else
416 # error Unsupported architecture
417 #endif
418 
419   return AccessInfo{addr, size, is_store, !is_store, recover};
420 }
421 
422 static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame,
423                               ucontext_t *uc, uptr *registers_frame = nullptr) {
424   InternalMmapVector<BufferedStackTrace> stack_buffer(1);
425   BufferedStackTrace *stack = stack_buffer.data();
426   stack->Reset();
427   stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
428 
429   // The second stack frame contains the failure __hwasan_check function, as
430   // we have a stack frame for the registers saved in __hwasan_tag_mismatch that
431   // we wish to ignore. This (currently) only occurs on AArch64, as x64
432   // implementations use SIGTRAP to implement the failure, and thus do not go
433   // through the stack saver.
434   if (registers_frame && stack->trace && stack->size > 0) {
435     stack->trace++;
436     stack->size--;
437   }
438 
439   bool fatal = flags()->halt_on_error || !ai.recover;
440   ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
441                     registers_frame);
442 }
443 
444 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
445   AccessInfo ai = GetAccessInfo(info, uc);
446   if (!ai.is_store && !ai.is_load)
447     return false;
448 
449   SignalContext sig{info, uc};
450   HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
451 
452 #if defined(__aarch64__)
453   uc->uc_mcontext.pc += 4;
454 #elif defined(__x86_64__)
455 #else
456 # error Unsupported architecture
457 #endif
458   return true;
459 }
460 
461 static void OnStackUnwind(const SignalContext &sig, const void *,
462                           BufferedStackTrace *stack) {
463   stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
464                 common_flags()->fast_unwind_on_fatal);
465 }
466 
467 void HwasanOnDeadlySignal(int signo, void *info, void *context) {
468   // Probably a tag mismatch.
469   if (signo == SIGTRAP)
470     if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
471       return;
472 
473   HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
474 }
475 
476 
477 } // namespace __hwasan
478 
479 // Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
480 // rest of the mismatch handling code (C++).
481 void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
482                             size_t outsize) {
483   __hwasan::AccessInfo ai;
484   ai.is_store = access_info & 0x10;
485   ai.is_load = !ai.is_store;
486   ai.recover = access_info & 0x20;
487   ai.addr = addr;
488   if ((access_info & 0xf) == 0xf)
489     ai.size = outsize;
490   else
491     ai.size = 1 << (access_info & 0xf);
492 
493   __hwasan::HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
494                               (uptr)__builtin_frame_address(0), nullptr,
495                               registers_frame);
496   __builtin_unreachable();
497 }
498 
499 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
500