1 //===-- asan_stats.cpp ----------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // Code related to statistics collected by AddressSanitizer. 12 //===----------------------------------------------------------------------===// 13 #include "asan_interceptors.h" 14 #include "asan_internal.h" 15 #include "asan_stats.h" 16 #include "asan_thread.h" 17 #include "sanitizer_common/sanitizer_allocator_interface.h" 18 #include "sanitizer_common/sanitizer_mutex.h" 19 #include "sanitizer_common/sanitizer_stackdepot.h" 20 21 namespace __asan { 22 23 AsanStats::AsanStats() { 24 Clear(); 25 } 26 27 void AsanStats::Clear() { 28 CHECK(REAL(memset)); 29 REAL(memset)(this, 0, sizeof(AsanStats)); 30 } 31 32 static void PrintMallocStatsArray(const char *prefix, 33 uptr (&array)[kNumberOfSizeClasses]) { 34 Printf("%s", prefix); 35 for (uptr i = 0; i < kNumberOfSizeClasses; i++) { 36 if (!array[i]) continue; 37 Printf("%zu:%zu; ", i, array[i]); 38 } 39 Printf("\n"); 40 } 41 42 void AsanStats::Print() { 43 Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n", 44 malloced>>20, malloced_redzones>>20, mallocs); 45 Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs); 46 Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees); 47 Printf("Stats: %zuM really freed by %zu calls\n", 48 really_freed>>20, real_frees); 49 Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n", 50 (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, 51 mmaps, munmaps); 52 53 PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); 54 Printf("Stats: malloc large: %zu\n", malloc_large); 55 } 56 57 void AsanStats::MergeFrom(const AsanStats *stats) { 58 uptr *dst_ptr = reinterpret_cast<uptr*>(this); 59 const uptr *src_ptr = reinterpret_cast<const uptr*>(stats); 60 uptr num_fields = sizeof(*this) / sizeof(uptr); 61 for (uptr i = 0; i < num_fields; i++) 62 dst_ptr[i] += src_ptr[i]; 63 } 64 65 static Mutex print_lock; 66 67 static AsanStats unknown_thread_stats(LINKER_INITIALIZED); 68 static AsanStats dead_threads_stats(LINKER_INITIALIZED); 69 static Mutex dead_threads_stats_lock; 70 // Required for malloc_zone_statistics() on OS X. This can't be stored in 71 // per-thread AsanStats. 72 static uptr max_malloced_memory; 73 74 static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) { 75 AsanStats *accumulated_stats = reinterpret_cast<AsanStats*>(arg); 76 AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base); 77 if (AsanThread *t = tctx->thread) 78 accumulated_stats->MergeFrom(&t->stats()); 79 } 80 81 static void GetAccumulatedStats(AsanStats *stats) { 82 stats->Clear(); 83 { 84 ThreadRegistryLock l(&asanThreadRegistry()); 85 asanThreadRegistry() 86 .RunCallbackForEachThreadLocked(MergeThreadStats, stats); 87 } 88 stats->MergeFrom(&unknown_thread_stats); 89 { 90 Lock lock(&dead_threads_stats_lock); 91 stats->MergeFrom(&dead_threads_stats); 92 } 93 // This is not very accurate: we may miss allocation peaks that happen 94 // between two updates of accumulated_stats_. For more accurate bookkeeping 95 // the maximum should be updated on every malloc(), which is unacceptable. 96 if (max_malloced_memory < stats->malloced) { 97 max_malloced_memory = stats->malloced; 98 } 99 } 100 101 void FlushToDeadThreadStats(AsanStats *stats) { 102 Lock lock(&dead_threads_stats_lock); 103 dead_threads_stats.MergeFrom(stats); 104 stats->Clear(); 105 } 106 107 void FillMallocStatistics(AsanMallocStats *malloc_stats) { 108 AsanStats stats; 109 GetAccumulatedStats(&stats); 110 malloc_stats->blocks_in_use = stats.mallocs; 111 malloc_stats->size_in_use = stats.malloced; 112 malloc_stats->max_size_in_use = max_malloced_memory; 113 malloc_stats->size_allocated = stats.mmaped; 114 } 115 116 AsanStats &GetCurrentThreadStats() { 117 AsanThread *t = GetCurrentThread(); 118 return (t) ? t->stats() : unknown_thread_stats; 119 } 120 121 static void PrintAccumulatedStats() { 122 AsanStats stats; 123 GetAccumulatedStats(&stats); 124 // Use lock to keep reports from mixing up. 125 Lock lock(&print_lock); 126 stats.Print(); 127 StackDepotStats stack_depot_stats = StackDepotGetStats(); 128 Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", 129 stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20); 130 PrintInternalAllocatorStats(); 131 } 132 133 } // namespace __asan 134 135 // ---------------------- Interface ---------------- {{{1 136 using namespace __asan; 137 138 uptr __sanitizer_get_current_allocated_bytes() { 139 AsanStats stats; 140 GetAccumulatedStats(&stats); 141 uptr malloced = stats.malloced; 142 uptr freed = stats.freed; 143 // Return sane value if malloced < freed due to racy 144 // way we update accumulated stats. 145 return (malloced > freed) ? malloced - freed : 0; 146 } 147 148 uptr __sanitizer_get_heap_size() { 149 AsanStats stats; 150 GetAccumulatedStats(&stats); 151 return stats.mmaped - stats.munmaped; 152 } 153 154 uptr __sanitizer_get_free_bytes() { 155 AsanStats stats; 156 GetAccumulatedStats(&stats); 157 uptr total_free = stats.mmaped 158 - stats.munmaped 159 + stats.really_freed; 160 uptr total_used = stats.malloced 161 + stats.malloced_redzones; 162 // Return sane value if total_free < total_used due to racy 163 // way we update accumulated stats. 164 return (total_free > total_used) ? total_free - total_used : 0; 165 } 166 167 uptr __sanitizer_get_unmapped_bytes() { 168 return 0; 169 } 170 171 void __asan_print_accumulated_stats() { 172 PrintAccumulatedStats(); 173 } 174