xref: /freebsd/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===-- msan_linux.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemorySanitizer.
10 //
11 // Linux-, NetBSD- and FreeBSD-specific code.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
16 
17 #  include <elf.h>
18 #  include <link.h>
19 #  include <pthread.h>
20 #  include <signal.h>
21 #  include <stdio.h>
22 #  include <stdlib.h>
23 #  include <sys/resource.h>
24 #  include <sys/time.h>
25 #  include <unistd.h>
26 #  include <unwind.h>
27 
28 #  include "msan.h"
29 #  include "msan_allocator.h"
30 #  include "msan_chained_origin_depot.h"
31 #  include "msan_report.h"
32 #  include "msan_thread.h"
33 #  include "sanitizer_common/sanitizer_common.h"
34 #  include "sanitizer_common/sanitizer_procmaps.h"
35 #  include "sanitizer_common/sanitizer_stackdepot.h"
36 
37 namespace __msan {
38 
39 void ReportMapRange(const char *descr, uptr beg, uptr size) {
40   if (size > 0) {
41     uptr end = beg + size - 1;
42     VPrintf(1, "%s : 0x%zx - 0x%zx\n", descr, beg, end);
43   }
44 }
45 
46 static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
47   if (size > 0) {
48     uptr end = beg + size - 1;
49     if (!MemoryRangeIsAvailable(beg, end)) {
50       Printf("FATAL: Memory range 0x%zx - 0x%zx is not available.\n", beg, end);
51       return false;
52     }
53   }
54   return true;
55 }
56 
57 static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
58   if (size > 0) {
59     void *addr = MmapFixedNoAccess(beg, size, name);
60     if (beg == 0 && addr) {
61       // Depending on the kernel configuration, we may not be able to protect
62       // the page at address zero.
63       uptr gap = 16 * GetPageSizeCached();
64       beg += gap;
65       size -= gap;
66       addr = MmapFixedNoAccess(beg, size, name);
67     }
68     if ((uptr)addr != beg) {
69       uptr end = beg + size - 1;
70       Printf("FATAL: Cannot protect memory range 0x%zx - 0x%zx (%s).\n", beg,
71              end, name);
72       return false;
73     }
74   }
75   return true;
76 }
77 
78 static void CheckMemoryLayoutSanity() {
79   uptr prev_end = 0;
80   for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
81     uptr start = kMemoryLayout[i].start;
82     uptr end = kMemoryLayout[i].end;
83     MappingDesc::Type type = kMemoryLayout[i].type;
84     CHECK_LT(start, end);
85     CHECK_EQ(prev_end, start);
86     CHECK(addr_is_type(start, type));
87     CHECK(addr_is_type((start + end) / 2, type));
88     CHECK(addr_is_type(end - 1, type));
89     if (type == MappingDesc::APP) {
90       uptr addr = start;
91       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
92       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
93       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
94 
95       addr = (start + end) / 2;
96       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
97       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
98       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
99 
100       addr = end - 1;
101       CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
102       CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
103       CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
104     }
105     prev_end = end;
106   }
107 }
108 
109 bool InitShadow(bool init_origins) {
110   // Let user know mapping parameters first.
111   VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
112   for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
113     VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
114             kMemoryLayout[i].end - 1);
115 
116   CheckMemoryLayoutSanity();
117 
118   if (!MEM_IS_APP(&__msan_init)) {
119     Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
120            reinterpret_cast<void *>(&__msan_init));
121     return false;
122   }
123 
124   const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
125 
126   for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
127     uptr start = kMemoryLayout[i].start;
128     uptr end = kMemoryLayout[i].end;
129     uptr size = end - start;
130     MappingDesc::Type type = kMemoryLayout[i].type;
131 
132     // Check if the segment should be mapped based on platform constraints.
133     if (start >= maxVirtualAddress)
134       continue;
135 
136     bool map = type == MappingDesc::SHADOW ||
137                (init_origins && type == MappingDesc::ORIGIN);
138     bool protect = type == MappingDesc::INVALID ||
139                    (!init_origins && type == MappingDesc::ORIGIN);
140     CHECK(!(map && protect));
141     if (!map && !protect)
142       CHECK(type == MappingDesc::APP);
143     if (map) {
144       if (!CheckMemoryRangeAvailability(start, size))
145         return false;
146       if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
147         return false;
148       if (common_flags()->use_madv_dontdump)
149         DontDumpShadowMemory(start, size);
150     }
151     if (protect) {
152       if (!CheckMemoryRangeAvailability(start, size))
153         return false;
154       if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
155         return false;
156     }
157   }
158 
159   return true;
160 }
161 
162 static void MsanAtExit(void) {
163   if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
164     ReportStats();
165   if (msan_report_count > 0) {
166     ReportAtExitStatistics();
167     if (common_flags()->exitcode)
168       internal__exit(common_flags()->exitcode);
169   }
170 }
171 
172 void InstallAtExitHandler() {
173   atexit(MsanAtExit);
174 }
175 
176 // ---------------------- TSD ---------------- {{{1
177 
178 #if SANITIZER_NETBSD
179 // Thread Static Data cannot be used in early init on NetBSD.
180 // Reuse the MSan TSD API for compatibility with existing code
181 // with an alternative implementation.
182 
183 static void (*tsd_destructor)(void *tsd) = nullptr;
184 
185 struct tsd_key {
186   tsd_key() : key(nullptr) {}
187   ~tsd_key() {
188     CHECK(tsd_destructor);
189     if (key)
190       (*tsd_destructor)(key);
191   }
192   MsanThread *key;
193 };
194 
195 static thread_local struct tsd_key key;
196 
197 void MsanTSDInit(void (*destructor)(void *tsd)) {
198   CHECK(!tsd_destructor);
199   tsd_destructor = destructor;
200 }
201 
202 MsanThread *GetCurrentThread() {
203   CHECK(tsd_destructor);
204   return key.key;
205 }
206 
207 void SetCurrentThread(MsanThread *tsd) {
208   CHECK(tsd_destructor);
209   CHECK(tsd);
210   CHECK(!key.key);
211   key.key = tsd;
212 }
213 
214 void MsanTSDDtor(void *tsd) {
215   CHECK(tsd_destructor);
216   CHECK_EQ(key.key, tsd);
217   key.key = nullptr;
218   // Make sure that signal handler can not see a stale current thread pointer.
219   atomic_signal_fence(memory_order_seq_cst);
220   MsanThread::TSDDtor(tsd);
221 }
222 #else
223 static pthread_key_t tsd_key;
224 static bool tsd_key_inited = false;
225 
226 void MsanTSDInit(void (*destructor)(void *tsd)) {
227   CHECK(!tsd_key_inited);
228   tsd_key_inited = true;
229   CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
230 }
231 
232 static THREADLOCAL MsanThread* msan_current_thread;
233 
234 MsanThread *GetCurrentThread() {
235   return msan_current_thread;
236 }
237 
238 void SetCurrentThread(MsanThread *t) {
239   // Make sure we do not reset the current MsanThread.
240   CHECK_EQ(0, msan_current_thread);
241   msan_current_thread = t;
242   // Make sure that MsanTSDDtor gets called at the end.
243   CHECK(tsd_key_inited);
244   pthread_setspecific(tsd_key, (void *)t);
245 }
246 
247 void MsanTSDDtor(void *tsd) {
248   MsanThread *t = (MsanThread*)tsd;
249   if (t->destructor_iterations_ > 1) {
250     t->destructor_iterations_--;
251     CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
252     return;
253   }
254   msan_current_thread = nullptr;
255   // Make sure that signal handler can not see a stale current thread pointer.
256   atomic_signal_fence(memory_order_seq_cst);
257   MsanThread::TSDDtor(tsd);
258 }
259 #  endif
260 
261 static void BeforeFork() {
262   // Usually we lock ThreadRegistry, but msan does not have one.
263   LockAllocator();
264   StackDepotLockBeforeFork();
265   ChainedOriginDepotBeforeFork();
266 }
267 
268 static void AfterFork(bool fork_child) {
269   ChainedOriginDepotAfterFork(fork_child);
270   StackDepotUnlockAfterFork(fork_child);
271   UnlockAllocator();
272   // Usually we unlock ThreadRegistry, but msan does not have one.
273 }
274 
275 void InstallAtForkHandler() {
276   pthread_atfork(
277       &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
278       []() { AfterFork(/* fork_child= */ true); });
279 }
280 
281 } // namespace __msan
282 
283 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
284