xref: /freebsd/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- msan_linux.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemorySanitizer.
10 //
11 // Linux-, NetBSD- and FreeBSD-specific code.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
16 
17 #  include <elf.h>
18 #  include <link.h>
19 #  include <pthread.h>
20 #  include <signal.h>
21 #  include <stdio.h>
22 #  include <stdlib.h>
23 #  if SANITIZER_LINUX
24 #    include <sys/personality.h>
25 #  endif
26 #  include <sys/resource.h>
27 #  include <sys/time.h>
28 #  include <unistd.h>
29 #  include <unwind.h>
30 
31 #  include "msan.h"
32 #  include "msan_allocator.h"
33 #  include "msan_chained_origin_depot.h"
34 #  include "msan_report.h"
35 #  include "msan_thread.h"
36 #  include "sanitizer_common/sanitizer_common.h"
37 #  include "sanitizer_common/sanitizer_procmaps.h"
38 #  include "sanitizer_common/sanitizer_stackdepot.h"
39 
40 namespace __msan {
41 
ReportMapRange(const char * descr,uptr beg,uptr size)42 void ReportMapRange(const char *descr, uptr beg, uptr size) {
43   if (size > 0) {
44     uptr end = beg + size - 1;
45     VPrintf(1, "%s : %p-%p\n", descr, (void *)beg, (void *)end);
46   }
47 }
48 
CheckMemoryRangeAvailability(uptr beg,uptr size,bool verbose)49 static bool CheckMemoryRangeAvailability(uptr beg, uptr size, bool verbose) {
50   if (size > 0) {
51     uptr end = beg + size - 1;
52     if (!MemoryRangeIsAvailable(beg, end)) {
53       if (verbose)
54         Printf("FATAL: MemorySanitizer: Shadow range %p-%p is not available.\n",
55                (void *)beg, (void *)end);
56       return false;
57     }
58   }
59   return true;
60 }
61 
ProtectMemoryRange(uptr beg,uptr size,const char * name)62 static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
63   if (size > 0) {
64     void *addr = MmapFixedNoAccess(beg, size, name);
65     if (beg == 0 && addr) {
66       // Depending on the kernel configuration, we may not be able to protect
67       // the page at address zero.
68       uptr gap = 16 * GetPageSizeCached();
69       beg += gap;
70       size -= gap;
71       addr = MmapFixedNoAccess(beg, size, name);
72     }
73     if ((uptr)addr != beg) {
74       uptr end = beg + size - 1;
75       Printf(
76           "FATAL: MemorySanitizer: Cannot protect memory range %p-%p (%s).\n",
77           (void *)beg, (void *)end, name);
78       return false;
79     }
80   }
81   return true;
82 }
83 
CheckMemoryLayoutSanity()84 static void CheckMemoryLayoutSanity() {
85   uptr prev_end = 0;
86   for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
87     uptr start = kMemoryLayout[i].start;
88     uptr end = kMemoryLayout[i].end;
89     MappingDesc::Type type = kMemoryLayout[i].type;
90     CHECK_LT(start, end);
91     CHECK_EQ(prev_end, start);
92     CHECK(addr_is_type(start, type));
93     CHECK(addr_is_type((start + end) / 2, type));
94     CHECK(addr_is_type(end - 1, type));
95     if (type == MappingDesc::APP || type == MappingDesc::ALLOCATOR) {
96       uptr addr = start;
97       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
98       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
99       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
100 
101       addr = (start + end) / 2;
102       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
103       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
104       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
105 
106       addr = end - 1;
107       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
108       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
109       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
110     }
111     prev_end = end;
112   }
113 }
114 
InitShadow(bool init_origins,bool dry_run)115 static bool InitShadow(bool init_origins, bool dry_run) {
116   // Let user know mapping parameters first.
117   VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
118   for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
119     VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
120             kMemoryLayout[i].end - 1);
121 
122   CheckMemoryLayoutSanity();
123 
124   if (!MEM_IS_APP(&__msan_init)) {
125     if (!dry_run)
126       Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
127              reinterpret_cast<void *>(&__msan_init));
128     return false;
129   }
130 
131   const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
132 
133   for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
134     uptr start = kMemoryLayout[i].start;
135     uptr end = kMemoryLayout[i].end;
136     uptr size = end - start;
137     MappingDesc::Type type = kMemoryLayout[i].type;
138 
139     // Check if the segment should be mapped based on platform constraints.
140     if (start >= maxVirtualAddress)
141       continue;
142 
143     bool map = type == MappingDesc::SHADOW ||
144                (init_origins && type == MappingDesc::ORIGIN);
145     bool protect = type == MappingDesc::INVALID ||
146                    (!init_origins && type == MappingDesc::ORIGIN);
147     CHECK(!(map && protect));
148     if (!map && !protect) {
149       CHECK(type == MappingDesc::APP || type == MappingDesc::ALLOCATOR);
150 
151       if (dry_run && type == MappingDesc::ALLOCATOR &&
152           !CheckMemoryRangeAvailability(start, size, !dry_run))
153         return false;
154     }
155     if (map) {
156       if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
157         return false;
158       if (!dry_run &&
159           !MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
160         return false;
161       if (!dry_run && common_flags()->use_madv_dontdump)
162         DontDumpShadowMemory(start, size);
163     }
164     if (protect) {
165       if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
166         return false;
167       if (!dry_run && !ProtectMemoryRange(start, size, kMemoryLayout[i].name))
168         return false;
169     }
170   }
171 
172   return true;
173 }
174 
InitShadowWithReExec(bool init_origins)175 bool InitShadowWithReExec(bool init_origins) {
176   // Start with dry run: check layout is ok, but don't print warnings because
177   // warning messages will cause tests to fail (even if we successfully re-exec
178   // after the warning).
179   bool success = InitShadow(init_origins, true);
180   if (!success) {
181 #  if SANITIZER_LINUX
182     // Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
183     int old_personality = personality(0xffffffff);
184     bool aslr_on =
185         (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0);
186 
187     if (aslr_on) {
188       VReport(1,
189               "WARNING: MemorySanitizer: memory layout is incompatible, "
190               "possibly due to high-entropy ASLR.\n"
191               "Re-execing with fixed virtual address space.\n"
192               "N.B. reducing ASLR entropy is preferable.\n");
193       CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
194       ReExec();
195     }
196 #  endif
197   }
198 
199   // The earlier dry run didn't actually map or protect anything. Run again in
200   // non-dry run mode.
201   return success && InitShadow(init_origins, false);
202 }
203 
MsanAtExit(void)204 static void MsanAtExit(void) {
205   if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
206     ReportStats();
207   if (msan_report_count > 0) {
208     ReportAtExitStatistics();
209     if (common_flags()->exitcode)
210       internal__exit(common_flags()->exitcode);
211   }
212 }
213 
InstallAtExitHandler()214 void InstallAtExitHandler() {
215   atexit(MsanAtExit);
216 }
217 
218 // ---------------------- TSD ---------------- {{{1
219 
220 #if SANITIZER_NETBSD
221 // Thread Static Data cannot be used in early init on NetBSD.
222 // Reuse the MSan TSD API for compatibility with existing code
223 // with an alternative implementation.
224 
225 static void (*tsd_destructor)(void *tsd) = nullptr;
226 
227 struct tsd_key {
tsd_key__msan::tsd_key228   tsd_key() : key(nullptr) {}
~tsd_key__msan::tsd_key229   ~tsd_key() {
230     CHECK(tsd_destructor);
231     if (key)
232       (*tsd_destructor)(key);
233   }
234   MsanThread *key;
235 };
236 
237 static thread_local struct tsd_key key;
238 
MsanTSDInit(void (* destructor)(void * tsd))239 void MsanTSDInit(void (*destructor)(void *tsd)) {
240   CHECK(!tsd_destructor);
241   tsd_destructor = destructor;
242 }
243 
GetCurrentThread()244 MsanThread *GetCurrentThread() {
245   CHECK(tsd_destructor);
246   return key.key;
247 }
248 
SetCurrentThread(MsanThread * tsd)249 void SetCurrentThread(MsanThread *tsd) {
250   CHECK(tsd_destructor);
251   CHECK(tsd);
252   CHECK(!key.key);
253   key.key = tsd;
254 }
255 
MsanTSDDtor(void * tsd)256 void MsanTSDDtor(void *tsd) {
257   CHECK(tsd_destructor);
258   CHECK_EQ(key.key, tsd);
259   key.key = nullptr;
260   // Make sure that signal handler can not see a stale current thread pointer.
261   atomic_signal_fence(memory_order_seq_cst);
262   MsanThread::TSDDtor(tsd);
263 }
264 #else
265 static pthread_key_t tsd_key;
266 static bool tsd_key_inited = false;
267 
MsanTSDInit(void (* destructor)(void * tsd))268 void MsanTSDInit(void (*destructor)(void *tsd)) {
269   CHECK(!tsd_key_inited);
270   tsd_key_inited = true;
271   CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
272 }
273 
274 static THREADLOCAL MsanThread* msan_current_thread;
275 
GetCurrentThread()276 MsanThread *GetCurrentThread() {
277   return msan_current_thread;
278 }
279 
SetCurrentThread(MsanThread * t)280 void SetCurrentThread(MsanThread *t) {
281   // Make sure we do not reset the current MsanThread.
282   CHECK_EQ(0, msan_current_thread);
283   msan_current_thread = t;
284   // Make sure that MsanTSDDtor gets called at the end.
285   CHECK(tsd_key_inited);
286   pthread_setspecific(tsd_key, (void *)t);
287 }
288 
MsanTSDDtor(void * tsd)289 void MsanTSDDtor(void *tsd) {
290   MsanThread *t = (MsanThread*)tsd;
291   if (t->destructor_iterations_ > 1) {
292     t->destructor_iterations_--;
293     CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
294     return;
295   }
296   ScopedBlockSignals block(nullptr);
297   msan_current_thread = nullptr;
298   // Make sure that signal handler can not see a stale current thread pointer.
299   atomic_signal_fence(memory_order_seq_cst);
300   MsanThread::TSDDtor(tsd);
301 }
302 #  endif
303 
BeforeFork()304 static void BeforeFork() {
305   // Usually we lock ThreadRegistry, but msan does not have one.
306   LockAllocator();
307   StackDepotLockBeforeFork();
308   ChainedOriginDepotBeforeFork();
309 }
310 
AfterFork(bool fork_child)311 static void AfterFork(bool fork_child) {
312   ChainedOriginDepotAfterFork(fork_child);
313   StackDepotUnlockAfterFork(fork_child);
314   UnlockAllocator();
315   // Usually we unlock ThreadRegistry, but msan does not have one.
316 }
317 
InstallAtForkHandler()318 void InstallAtForkHandler() {
319   pthread_atfork(
320       &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
321       []() { AfterFork(/* fork_child= */ true); });
322 }
323 
324 } // namespace __msan
325 
326 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
327