1 //===-- memprof_rtl.cpp --------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemProfiler, a memory profiler.
10 //
11 // Main file of the MemProf run-time library.
12 //===----------------------------------------------------------------------===//
13
14 #include "memprof_allocator.h"
15 #include "memprof_interceptors.h"
16 #include "memprof_interface_internal.h"
17 #include "memprof_internal.h"
18 #include "memprof_mapping.h"
19 #include "memprof_stack.h"
20 #include "memprof_stats.h"
21 #include "memprof_thread.h"
22 #include "sanitizer_common/sanitizer_atomic.h"
23 #include "sanitizer_common/sanitizer_flags.h"
24 #include "sanitizer_common/sanitizer_interface_internal.h"
25 #include "sanitizer_common/sanitizer_libc.h"
26 #include "sanitizer_common/sanitizer_symbolizer.h"
27
28 #include <time.h>
29
30 SANITIZER_WEAK_ATTRIBUTE char __memprof_default_options_str[1];
31
32 uptr __memprof_shadow_memory_dynamic_address; // Global interface symbol.
33
34 // Allow the user to specify a profile output file via the binary.
35 SANITIZER_WEAK_ATTRIBUTE char __memprof_profile_filename[1];
36
37 // Share ClHistogram compiler flag with runtime.
38 SANITIZER_WEAK_ATTRIBUTE bool __memprof_histogram;
39
40 namespace __memprof {
41
MemprofDie()42 static void MemprofDie() {
43 static atomic_uint32_t num_calls;
44 if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
45 // Don't die twice - run a busy loop.
46 while (1) {
47 internal_sched_yield();
48 }
49 }
50 if (common_flags()->print_module_map >= 1)
51 DumpProcessMap();
52 if (flags()->unmap_shadow_on_exit) {
53 if (kHighShadowEnd)
54 UnmapOrDie((void *)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
55 }
56 }
57
MemprofOnDeadlySignal(int signo,void * siginfo,void * context)58 static void MemprofOnDeadlySignal(int signo, void *siginfo, void *context) {
59 // We call StartReportDeadlySignal not HandleDeadlySignal so we get the
60 // deadly signal message to stderr but no writing to the profile output file
61 StartReportDeadlySignal();
62 __memprof_profile_dump();
63 Die();
64 }
65
CheckUnwind()66 static void CheckUnwind() {
67 GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check);
68 stack.Print();
69 }
70
71 // -------------------------- Globals --------------------- {{{1
72 int memprof_inited;
73 bool memprof_init_is_running;
74 int memprof_timestamp_inited;
75 long memprof_init_timestamp_s;
76
77 uptr kHighMemEnd;
78
79 // -------------------------- Run-time entry ------------------- {{{1
80 // exported functions
81
82 #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() __memprof::RecordAccess(addr);
83 #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY_HIST() \
84 __memprof::RecordAccessHistogram(addr);
85
86 #define MEMPROF_MEMORY_ACCESS_CALLBACK(type) \
87 extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_##type(uptr addr) { \
88 MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() \
89 }
90
91 #define MEMPROF_MEMORY_ACCESS_CALLBACK_HIST(type) \
92 extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_hist_##type( \
93 uptr addr) { \
94 MEMPROF_MEMORY_ACCESS_CALLBACK_BODY_HIST() \
95 }
96
97 MEMPROF_MEMORY_ACCESS_CALLBACK_HIST(load)
MEMPROF_MEMORY_ACCESS_CALLBACK_HIST(store)98 MEMPROF_MEMORY_ACCESS_CALLBACK_HIST(store)
99
100 MEMPROF_MEMORY_ACCESS_CALLBACK(load)
101 MEMPROF_MEMORY_ACCESS_CALLBACK(store)
102
103 // Force the linker to keep the symbols for various MemProf interface
104 // functions. We want to keep those in the executable in order to let the
105 // instrumented dynamic libraries access the symbol even if it is not used by
106 // the executable itself. This should help if the build system is removing dead
107 // code at link time.
108 static NOINLINE void force_interface_symbols() {
109 volatile int fake_condition = 0; // prevent dead condition elimination.
110 // clang-format off
111 switch (fake_condition) {
112 case 1: __memprof_record_access(nullptr); break;
113 case 2: __memprof_record_access_range(nullptr, 0); break;
114 }
115 // clang-format on
116 }
117
memprof_atexit()118 static void memprof_atexit() {
119 Printf("MemProfiler exit stats:\n");
120 __memprof_print_accumulated_stats();
121 }
122
InitializeHighMemEnd()123 static void InitializeHighMemEnd() {
124 kHighMemEnd = GetMaxUserVirtualAddress();
125 // Increase kHighMemEnd to make sure it's properly
126 // aligned together with kHighMemBeg:
127 kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
128 }
129
PrintAddressSpaceLayout()130 void PrintAddressSpaceLayout() {
131 if (kHighMemBeg) {
132 Printf("|| `[%p, %p]` || HighMem ||\n", (void *)kHighMemBeg,
133 (void *)kHighMemEnd);
134 Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowBeg,
135 (void *)kHighShadowEnd);
136 }
137 Printf("|| `[%p, %p]` || ShadowGap ||\n", (void *)kShadowGapBeg,
138 (void *)kShadowGapEnd);
139 if (kLowShadowBeg) {
140 Printf("|| `[%p, %p]` || LowShadow ||\n", (void *)kLowShadowBeg,
141 (void *)kLowShadowEnd);
142 Printf("|| `[%p, %p]` || LowMem ||\n", (void *)kLowMemBeg,
143 (void *)kLowMemEnd);
144 }
145 Printf("MemToShadow(shadow): %p %p", (void *)MEM_TO_SHADOW(kLowShadowBeg),
146 (void *)MEM_TO_SHADOW(kLowShadowEnd));
147 if (kHighMemBeg) {
148 Printf(" %p %p", (void *)MEM_TO_SHADOW(kHighShadowBeg),
149 (void *)MEM_TO_SHADOW(kHighShadowEnd));
150 }
151 Printf("\n");
152 Printf("malloc_context_size=%zu\n",
153 (uptr)common_flags()->malloc_context_size);
154
155 Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
156 Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
157 Printf("SHADOW_OFFSET: %p\n", (void *)SHADOW_OFFSET);
158 CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
159 }
160
MemprofInitInternal()161 static void MemprofInitInternal() {
162 if (LIKELY(memprof_inited))
163 return;
164 SanitizerToolName = "MemProfiler";
165 CHECK(!memprof_init_is_running && "MemProf init calls itself!");
166 memprof_init_is_running = true;
167
168 CacheBinaryName();
169
170 // Initialize flags. This must be done early, because most of the
171 // initialization steps look at flags().
172 InitializeFlags();
173
174 AvoidCVE_2016_2143();
175
176 SetMallocContextSize(common_flags()->malloc_context_size);
177
178 InitializeHighMemEnd();
179
180 // Make sure we are not statically linked.
181 __interception::DoesNotSupportStaticLinking();
182
183 // Install tool-specific callbacks in sanitizer_common.
184 AddDieCallback(MemprofDie);
185 SetCheckUnwindCallback(CheckUnwind);
186
187 // Use profile name specified via the binary itself if it exists, and hasn't
188 // been overrriden by a flag at runtime.
189 if (__memprof_profile_filename[0] != 0 && !common_flags()->log_path)
190 __sanitizer_set_report_path(__memprof_profile_filename);
191 else
192 __sanitizer_set_report_path(common_flags()->log_path);
193
194 __sanitizer::InitializePlatformEarly();
195
196 // Setup internal allocator callback.
197 SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
198
199 InitializeMemprofInterceptors();
200 CheckASLR();
201
202 ReplaceSystemMalloc();
203
204 DisableCoreDumperIfNecessary();
205
206 InitializeShadowMemory();
207
208 TSDInit(PlatformTSDDtor);
209 InstallDeadlySignalHandlers(MemprofOnDeadlySignal);
210
211 InitializeAllocator();
212
213 if (flags()->atexit)
214 Atexit(memprof_atexit);
215
216 InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
217
218 // Create main thread.
219 MemprofThread *main_thread = CreateMainThread();
220 CHECK_EQ(0, main_thread->tid());
221 force_interface_symbols(); // no-op.
222
223 Symbolizer::LateInitialize();
224
225 VReport(1, "MemProfiler Init done\n");
226
227 memprof_init_is_running = false;
228 memprof_inited = 1;
229 }
230
MemprofInitTime()231 void MemprofInitTime() {
232 if (LIKELY(memprof_timestamp_inited))
233 return;
234 timespec ts;
235 clock_gettime(CLOCK_REALTIME, &ts);
236 memprof_init_timestamp_s = ts.tv_sec;
237 memprof_timestamp_inited = 1;
238 }
239
240 // Initialize as requested from some part of MemProf runtime library
241 // (interceptors, allocator, etc).
MemprofInitFromRtl()242 void MemprofInitFromRtl() { MemprofInitInternal(); }
243
244 #if MEMPROF_DYNAMIC
245 // Initialize runtime in case it's LD_PRELOAD-ed into uninstrumented executable
246 // (and thus normal initializers from .preinit_array or modules haven't run).
247
248 class MemprofInitializer {
249 public:
MemprofInitializer()250 MemprofInitializer() { MemprofInitFromRtl(); }
251 };
252
253 static MemprofInitializer memprof_initializer;
254 #endif // MEMPROF_DYNAMIC
255
256 } // namespace __memprof
257
258 // ---------------------- Interface ---------------- {{{1
259 using namespace __memprof;
260
261 // Initialize as requested from instrumented application code.
__memprof_init()262 void __memprof_init() {
263 MemprofInitTime();
264 MemprofInitInternal();
265 }
266
__memprof_preinit()267 void __memprof_preinit() { MemprofInitInternal(); }
268
__memprof_version_mismatch_check_v1()269 void __memprof_version_mismatch_check_v1() {}
270
__memprof_record_access(void const volatile * addr)271 void __memprof_record_access(void const volatile *addr) {
272 __memprof::RecordAccess((uptr)addr);
273 }
274
__memprof_record_access_hist(void const volatile * addr)275 void __memprof_record_access_hist(void const volatile *addr) {
276 __memprof::RecordAccessHistogram((uptr)addr);
277 }
278
__memprof_record_access_range(void const volatile * addr,uptr size)279 void __memprof_record_access_range(void const volatile *addr, uptr size) {
280 for (uptr a = (uptr)addr; a < (uptr)addr + size; a += kWordSize)
281 __memprof::RecordAccess(a);
282 }
283
__memprof_record_access_range_hist(void const volatile * addr,uptr size)284 void __memprof_record_access_range_hist(void const volatile *addr, uptr size) {
285 for (uptr a = (uptr)addr; a < (uptr)addr + size; a += kWordSize)
286 __memprof::RecordAccessHistogram(a);
287 }
288
289 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16
__sanitizer_unaligned_load16(const uu16 * p)290 __sanitizer_unaligned_load16(const uu16 *p) {
291 __memprof_record_access(p);
292 return *p;
293 }
294
295 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u32
__sanitizer_unaligned_load32(const uu32 * p)296 __sanitizer_unaligned_load32(const uu32 *p) {
297 __memprof_record_access(p);
298 return *p;
299 }
300
301 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64
__sanitizer_unaligned_load64(const uu64 * p)302 __sanitizer_unaligned_load64(const uu64 *p) {
303 __memprof_record_access(p);
304 return *p;
305 }
306
307 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store16(uu16 * p,u16 x)308 __sanitizer_unaligned_store16(uu16 *p, u16 x) {
309 __memprof_record_access(p);
310 *p = x;
311 }
312
313 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store32(uu32 * p,u32 x)314 __sanitizer_unaligned_store32(uu32 *p, u32 x) {
315 __memprof_record_access(p);
316 *p = x;
317 }
318
319 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_unaligned_store64(uu64 * p,u64 x)320 __sanitizer_unaligned_store64(uu64 *p, u64 x) {
321 __memprof_record_access(p);
322 *p = x;
323 }
324