1 //===-- asan_fake_stack.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // FakeStack is used to detect use-after-return bugs. 12 //===----------------------------------------------------------------------===// 13 14 #include "asan_allocator.h" 15 #include "asan_poisoning.h" 16 #include "asan_thread.h" 17 18 namespace __asan { 19 20 static const u64 kMagic1 = kAsanStackAfterReturnMagic; 21 static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; 22 static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; 23 static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; 24 25 static const u64 kAllocaRedzoneSize = 32UL; 26 static const u64 kAllocaRedzoneMask = 31UL; 27 28 // For small size classes inline PoisonShadow for better performance. 29 ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { 30 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); 31 if (SHADOW_SCALE == 3 && class_id <= 6) { 32 // This code expects SHADOW_SCALE=3. 33 for (uptr i = 0; i < (((uptr)1) << class_id); i++) { 34 shadow[i] = magic; 35 // Make sure this does not become memset. 36 SanitizerBreakOptimization(nullptr); 37 } 38 } else { 39 // The size class is too big, it's cheaper to poison only size bytes. 40 PoisonShadow(ptr, size, static_cast<u8>(magic)); 41 } 42 } 43 44 FakeStack *FakeStack::Create(uptr stack_size_log) { 45 static uptr kMinStackSizeLog = 16; 46 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); 47 if (stack_size_log < kMinStackSizeLog) 48 stack_size_log = kMinStackSizeLog; 49 if (stack_size_log > kMaxStackSizeLog) 50 stack_size_log = kMaxStackSizeLog; 51 uptr size = RequiredSize(stack_size_log); 52 FakeStack *res = reinterpret_cast<FakeStack *>( 53 flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") 54 : MmapOrDie(size, "FakeStack")); 55 res->stack_size_log_ = stack_size_log; 56 u8 *p = reinterpret_cast<u8 *>(res); 57 VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " 58 "mmapped %zdK, noreserve=%d \n", 59 GetCurrentTidOrInvalid(), p, 60 p + FakeStack::RequiredSize(stack_size_log), stack_size_log, 61 size >> 10, flags()->uar_noreserve); 62 return res; 63 } 64 65 void FakeStack::Destroy(int tid) { 66 PoisonAll(0); 67 if (Verbosity() >= 2) { 68 InternalScopedString str; 69 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) 70 str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], 71 NumberOfFrames(stack_size_log(), class_id)); 72 Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); 73 } 74 uptr size = RequiredSize(stack_size_log_); 75 FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size); 76 UnmapOrDie(this, size); 77 } 78 79 void FakeStack::PoisonAll(u8 magic) { 80 PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()), 81 magic); 82 } 83 84 #if !defined(_MSC_VER) || defined(__clang__) 85 ALWAYS_INLINE USED 86 #endif 87 FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, 88 uptr real_stack) { 89 CHECK_LT(class_id, kNumberOfSizeClasses); 90 if (needs_gc_) 91 GC(real_stack); 92 uptr &hint_position = hint_position_[class_id]; 93 const int num_iter = NumberOfFrames(stack_size_log, class_id); 94 u8 *flags = GetFlags(stack_size_log, class_id); 95 for (int i = 0; i < num_iter; i++) { 96 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++); 97 // This part is tricky. On one hand, checking and setting flags[pos] 98 // should be atomic to ensure async-signal safety. But on the other hand, 99 // if the signal arrives between checking and setting flags[pos], the 100 // signal handler's fake stack will start from a different hint_position 101 // and so will not touch this particular byte. So, it is safe to do this 102 // with regular non-atomic load and store (at least I was not able to make 103 // this code crash). 104 if (flags[pos]) continue; 105 flags[pos] = 1; 106 FakeFrame *res = reinterpret_cast<FakeFrame *>( 107 GetFrame(stack_size_log, class_id, pos)); 108 res->real_stack = real_stack; 109 *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos]; 110 return res; 111 } 112 return nullptr; // We are out of fake stack. 113 } 114 115 uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { 116 uptr stack_size_log = this->stack_size_log(); 117 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0)); 118 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log); 119 if (ptr < beg || ptr >= end) return 0; 120 uptr class_id = (ptr - beg) >> stack_size_log; 121 uptr base = beg + (class_id << stack_size_log); 122 CHECK_LE(base, ptr); 123 CHECK_LT(ptr, base + (((uptr)1) << stack_size_log)); 124 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id); 125 uptr res = base + pos * BytesInSizeClass(class_id); 126 *frame_end = res + BytesInSizeClass(class_id); 127 *frame_beg = res + sizeof(FakeFrame); 128 return res; 129 } 130 131 void FakeStack::HandleNoReturn() { 132 needs_gc_ = true; 133 } 134 135 // When throw, longjmp or some such happens we don't call OnFree() and 136 // as the result may leak one or more fake frames, but the good news is that 137 // we are notified about all such events by HandleNoReturn(). 138 // If we recently had such no-return event we need to collect garbage frames. 139 // We do it based on their 'real_stack' values -- everything that is lower 140 // than the current real_stack is garbage. 141 NOINLINE void FakeStack::GC(uptr real_stack) { 142 uptr collected = 0; 143 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { 144 u8 *flags = GetFlags(stack_size_log(), class_id); 145 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; 146 i++) { 147 if (flags[i] == 0) continue; // not allocated. 148 FakeFrame *ff = reinterpret_cast<FakeFrame *>( 149 GetFrame(stack_size_log(), class_id, i)); 150 if (ff->real_stack < real_stack) { 151 flags[i] = 0; 152 collected++; 153 } 154 } 155 } 156 needs_gc_ = false; 157 } 158 159 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { 160 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { 161 u8 *flags = GetFlags(stack_size_log(), class_id); 162 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; 163 i++) { 164 if (flags[i] == 0) continue; // not allocated. 165 FakeFrame *ff = reinterpret_cast<FakeFrame *>( 166 GetFrame(stack_size_log(), class_id, i)); 167 uptr begin = reinterpret_cast<uptr>(ff); 168 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); 169 } 170 } 171 } 172 173 #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA 174 static THREADLOCAL FakeStack *fake_stack_tls; 175 176 FakeStack *GetTLSFakeStack() { 177 return fake_stack_tls; 178 } 179 void SetTLSFakeStack(FakeStack *fs) { 180 fake_stack_tls = fs; 181 } 182 #else 183 FakeStack *GetTLSFakeStack() { return 0; } 184 void SetTLSFakeStack(FakeStack *fs) { } 185 #endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA 186 187 static FakeStack *GetFakeStack() { 188 AsanThread *t = GetCurrentThread(); 189 if (!t) return nullptr; 190 return t->get_or_create_fake_stack(); 191 } 192 193 static FakeStack *GetFakeStackFast() { 194 if (FakeStack *fs = GetTLSFakeStack()) 195 return fs; 196 if (!__asan_option_detect_stack_use_after_return) 197 return nullptr; 198 return GetFakeStack(); 199 } 200 201 static FakeStack *GetFakeStackFastAlways() { 202 if (FakeStack *fs = GetTLSFakeStack()) 203 return fs; 204 return GetFakeStack(); 205 } 206 207 static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { 208 FakeStack *fs = GetFakeStackFast(); 209 if (!fs) return 0; 210 uptr local_stack; 211 uptr real_stack = reinterpret_cast<uptr>(&local_stack); 212 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); 213 if (!ff) return 0; // Out of fake stack. 214 uptr ptr = reinterpret_cast<uptr>(ff); 215 SetShadow(ptr, size, class_id, 0); 216 return ptr; 217 } 218 219 static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) { 220 FakeStack *fs = GetFakeStackFastAlways(); 221 if (!fs) 222 return 0; 223 uptr local_stack; 224 uptr real_stack = reinterpret_cast<uptr>(&local_stack); 225 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); 226 if (!ff) 227 return 0; // Out of fake stack. 228 uptr ptr = reinterpret_cast<uptr>(ff); 229 SetShadow(ptr, size, class_id, 0); 230 return ptr; 231 } 232 233 static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { 234 FakeStack::Deallocate(ptr, class_id); 235 SetShadow(ptr, size, class_id, kMagic8); 236 } 237 238 } // namespace __asan 239 240 // ---------------------- Interface ---------------- {{{1 241 using namespace __asan; 242 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ 243 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ 244 __asan_stack_malloc_##class_id(uptr size) { \ 245 return OnMalloc(class_id, size); \ 246 } \ 247 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ 248 __asan_stack_malloc_always_##class_id(uptr size) { \ 249 return OnMallocAlways(class_id, size); \ 250 } \ 251 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ 252 uptr ptr, uptr size) { \ 253 OnFree(ptr, class_id, size); \ 254 } 255 256 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) 257 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1) 258 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2) 259 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3) 260 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4) 261 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5) 262 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6) 263 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7) 264 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8) 265 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9) 266 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10) 267 268 extern "C" { 269 // TODO: remove this method and fix tests that use it by setting 270 // -asan-use-after-return=never, after modal UAR flag lands 271 // (https://github.com/google/sanitizers/issues/1394) 272 SANITIZER_INTERFACE_ATTRIBUTE 273 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } 274 275 SANITIZER_INTERFACE_ATTRIBUTE 276 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, 277 void **end) { 278 FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack); 279 if (!fs) return nullptr; 280 uptr frame_beg, frame_end; 281 FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack( 282 reinterpret_cast<uptr>(addr), &frame_beg, &frame_end)); 283 if (!frame) return nullptr; 284 if (frame->magic != kCurrentStackFrameMagic) 285 return nullptr; 286 if (beg) *beg = reinterpret_cast<void*>(frame_beg); 287 if (end) *end = reinterpret_cast<void*>(frame_end); 288 return reinterpret_cast<void*>(frame->real_stack); 289 } 290 291 SANITIZER_INTERFACE_ATTRIBUTE 292 void __asan_alloca_poison(uptr addr, uptr size) { 293 uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; 294 uptr PartialRzAddr = addr + size; 295 uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; 296 uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1); 297 FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); 298 FastPoisonShadowPartialRightRedzone( 299 PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY, 300 RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); 301 FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); 302 } 303 304 SANITIZER_INTERFACE_ATTRIBUTE 305 void __asan_allocas_unpoison(uptr top, uptr bottom) { 306 if ((!top) || (top > bottom)) return; 307 REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0, 308 (bottom - top) / SHADOW_GRANULARITY); 309 } 310 } // extern "C" 311