1 //===-- memprof_rtl.cpp --------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of MemProfiler, a memory profiler. 10 // 11 // Main file of the MemProf run-time library. 12 //===----------------------------------------------------------------------===// 13 14 #include "memprof_allocator.h" 15 #include "memprof_interceptors.h" 16 #include "memprof_interface_internal.h" 17 #include "memprof_internal.h" 18 #include "memprof_mapping.h" 19 #include "memprof_stack.h" 20 #include "memprof_stats.h" 21 #include "memprof_thread.h" 22 #include "sanitizer_common/sanitizer_atomic.h" 23 #include "sanitizer_common/sanitizer_flags.h" 24 #include "sanitizer_common/sanitizer_interface_internal.h" 25 #include "sanitizer_common/sanitizer_libc.h" 26 #include "sanitizer_common/sanitizer_symbolizer.h" 27 28 #include <time.h> 29 30 uptr __memprof_shadow_memory_dynamic_address; // Global interface symbol. 31 32 // Allow the user to specify a profile output file via the binary. 33 SANITIZER_WEAK_ATTRIBUTE char __memprof_profile_filename[1]; 34 35 namespace __memprof { 36 37 static void MemprofDie() { 38 static atomic_uint32_t num_calls; 39 if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { 40 // Don't die twice - run a busy loop. 41 while (1) { 42 internal_sched_yield(); 43 } 44 } 45 if (common_flags()->print_module_map >= 1) 46 DumpProcessMap(); 47 if (flags()->unmap_shadow_on_exit) { 48 if (kHighShadowEnd) 49 UnmapOrDie((void *)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); 50 } 51 } 52 53 static void CheckUnwind() { 54 GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check); 55 stack.Print(); 56 } 57 58 // -------------------------- Globals --------------------- {{{1 59 int memprof_inited; 60 int memprof_init_done; 61 bool memprof_init_is_running; 62 int memprof_timestamp_inited; 63 long memprof_init_timestamp_s; 64 65 uptr kHighMemEnd; 66 67 // -------------------------- Run-time entry ------------------- {{{1 68 // exported functions 69 70 #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() __memprof::RecordAccess(addr); 71 72 #define MEMPROF_MEMORY_ACCESS_CALLBACK(type) \ 73 extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_##type(uptr addr) { \ 74 MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() \ 75 } 76 77 MEMPROF_MEMORY_ACCESS_CALLBACK(load) 78 MEMPROF_MEMORY_ACCESS_CALLBACK(store) 79 80 // Force the linker to keep the symbols for various MemProf interface 81 // functions. We want to keep those in the executable in order to let the 82 // instrumented dynamic libraries access the symbol even if it is not used by 83 // the executable itself. This should help if the build system is removing dead 84 // code at link time. 85 static NOINLINE void force_interface_symbols() { 86 volatile int fake_condition = 0; // prevent dead condition elimination. 87 // clang-format off 88 switch (fake_condition) { 89 case 1: __memprof_record_access(nullptr); break; 90 case 2: __memprof_record_access_range(nullptr, 0); break; 91 } 92 // clang-format on 93 } 94 95 static void memprof_atexit() { 96 Printf("MemProfiler exit stats:\n"); 97 __memprof_print_accumulated_stats(); 98 } 99 100 static void InitializeHighMemEnd() { 101 kHighMemEnd = GetMaxUserVirtualAddress(); 102 // Increase kHighMemEnd to make sure it's properly 103 // aligned together with kHighMemBeg: 104 kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1; 105 } 106 107 void PrintAddressSpaceLayout() { 108 if (kHighMemBeg) { 109 Printf("|| `[%p, %p]` || HighMem ||\n", (void *)kHighMemBeg, 110 (void *)kHighMemEnd); 111 Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowBeg, 112 (void *)kHighShadowEnd); 113 } 114 Printf("|| `[%p, %p]` || ShadowGap ||\n", (void *)kShadowGapBeg, 115 (void *)kShadowGapEnd); 116 if (kLowShadowBeg) { 117 Printf("|| `[%p, %p]` || LowShadow ||\n", (void *)kLowShadowBeg, 118 (void *)kLowShadowEnd); 119 Printf("|| `[%p, %p]` || LowMem ||\n", (void *)kLowMemBeg, 120 (void *)kLowMemEnd); 121 } 122 Printf("MemToShadow(shadow): %p %p", (void *)MEM_TO_SHADOW(kLowShadowBeg), 123 (void *)MEM_TO_SHADOW(kLowShadowEnd)); 124 if (kHighMemBeg) { 125 Printf(" %p %p", (void *)MEM_TO_SHADOW(kHighShadowBeg), 126 (void *)MEM_TO_SHADOW(kHighShadowEnd)); 127 } 128 Printf("\n"); 129 Printf("malloc_context_size=%zu\n", 130 (uptr)common_flags()->malloc_context_size); 131 132 Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); 133 Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); 134 Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); 135 CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); 136 } 137 138 static void MemprofInitInternal() { 139 if (LIKELY(memprof_inited)) 140 return; 141 SanitizerToolName = "MemProfiler"; 142 CHECK(!memprof_init_is_running && "MemProf init calls itself!"); 143 memprof_init_is_running = true; 144 145 CacheBinaryName(); 146 147 // Initialize flags. This must be done early, because most of the 148 // initialization steps look at flags(). 149 InitializeFlags(); 150 151 AvoidCVE_2016_2143(); 152 153 SetMallocContextSize(common_flags()->malloc_context_size); 154 155 InitializeHighMemEnd(); 156 157 // Make sure we are not statically linked. 158 MemprofDoesNotSupportStaticLinkage(); 159 160 // Install tool-specific callbacks in sanitizer_common. 161 AddDieCallback(MemprofDie); 162 SetCheckUnwindCallback(CheckUnwind); 163 164 // Use profile name specified via the binary itself if it exists, and hasn't 165 // been overrriden by a flag at runtime. 166 if (__memprof_profile_filename[0] != 0 && !common_flags()->log_path) 167 __sanitizer_set_report_path(__memprof_profile_filename); 168 else 169 __sanitizer_set_report_path(common_flags()->log_path); 170 171 __sanitizer::InitializePlatformEarly(); 172 173 // Setup internal allocator callback. 174 SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY); 175 176 InitializeMemprofInterceptors(); 177 CheckASLR(); 178 179 ReplaceSystemMalloc(); 180 181 DisableCoreDumperIfNecessary(); 182 183 InitializeShadowMemory(); 184 185 TSDInit(PlatformTSDDtor); 186 187 InitializeAllocator(); 188 189 // On Linux MemprofThread::ThreadStart() calls malloc() that's why 190 // memprof_inited should be set to 1 prior to initializing the threads. 191 memprof_inited = 1; 192 memprof_init_is_running = false; 193 194 if (flags()->atexit) 195 Atexit(memprof_atexit); 196 197 InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); 198 199 // interceptors 200 InitTlsSize(); 201 202 // Create main thread. 203 MemprofThread *main_thread = CreateMainThread(); 204 CHECK_EQ(0, main_thread->tid()); 205 force_interface_symbols(); // no-op. 206 SanitizerInitializeUnwinder(); 207 208 Symbolizer::LateInitialize(); 209 210 VReport(1, "MemProfiler Init done\n"); 211 212 memprof_init_done = 1; 213 } 214 215 void MemprofInitTime() { 216 if (LIKELY(memprof_timestamp_inited)) 217 return; 218 timespec ts; 219 clock_gettime(CLOCK_REALTIME, &ts); 220 memprof_init_timestamp_s = ts.tv_sec; 221 memprof_timestamp_inited = 1; 222 } 223 224 // Initialize as requested from some part of MemProf runtime library 225 // (interceptors, allocator, etc). 226 void MemprofInitFromRtl() { MemprofInitInternal(); } 227 228 #if MEMPROF_DYNAMIC 229 // Initialize runtime in case it's LD_PRELOAD-ed into uninstrumented executable 230 // (and thus normal initializers from .preinit_array or modules haven't run). 231 232 class MemprofInitializer { 233 public: 234 MemprofInitializer() { MemprofInitFromRtl(); } 235 }; 236 237 static MemprofInitializer memprof_initializer; 238 #endif // MEMPROF_DYNAMIC 239 240 } // namespace __memprof 241 242 // ---------------------- Interface ---------------- {{{1 243 using namespace __memprof; 244 245 // Initialize as requested from instrumented application code. 246 void __memprof_init() { 247 MemprofInitTime(); 248 MemprofInitInternal(); 249 } 250 251 void __memprof_preinit() { MemprofInitInternal(); } 252 253 void __memprof_version_mismatch_check_v1() {} 254 255 void __memprof_record_access(void const volatile *addr) { 256 __memprof::RecordAccess((uptr)addr); 257 } 258 259 void __memprof_record_access_range(void const volatile *addr, uptr size) { 260 for (uptr a = (uptr)addr; a < (uptr)addr + size; a += kWordSize) 261 __memprof::RecordAccess(a); 262 } 263 264 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16 265 __sanitizer_unaligned_load16(const uu16 *p) { 266 __memprof_record_access(p); 267 return *p; 268 } 269 270 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u32 271 __sanitizer_unaligned_load32(const uu32 *p) { 272 __memprof_record_access(p); 273 return *p; 274 } 275 276 extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64 277 __sanitizer_unaligned_load64(const uu64 *p) { 278 __memprof_record_access(p); 279 return *p; 280 } 281 282 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 283 __sanitizer_unaligned_store16(uu16 *p, u16 x) { 284 __memprof_record_access(p); 285 *p = x; 286 } 287 288 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 289 __sanitizer_unaligned_store32(uu32 *p, u32 x) { 290 __memprof_record_access(p); 291 *p = x; 292 } 293 294 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void 295 __sanitizer_unaligned_store64(uu64 *p, u64 x) { 296 __memprof_record_access(p); 297 *p = x; 298 } 299