1 //===-- asan_poisoning.cpp ------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // Shadow memory poisoning by ASan RTL and by user application. 12 //===----------------------------------------------------------------------===// 13 14 #include "asan_poisoning.h" 15 16 #include "asan_report.h" 17 #include "asan_stack.h" 18 #include "sanitizer_common/sanitizer_atomic.h" 19 #include "sanitizer_common/sanitizer_flags.h" 20 #include "sanitizer_common/sanitizer_interface_internal.h" 21 #include "sanitizer_common/sanitizer_libc.h" 22 23 namespace __asan { 24 25 static atomic_uint8_t can_poison_memory; 26 27 void SetCanPoisonMemory(bool value) { 28 atomic_store(&can_poison_memory, value, memory_order_release); 29 } 30 31 bool CanPoisonMemory() { 32 return atomic_load(&can_poison_memory, memory_order_acquire); 33 } 34 35 void PoisonShadow(uptr addr, uptr size, u8 value) { 36 if (value && !CanPoisonMemory()) return; 37 CHECK(AddrIsAlignedByGranularity(addr)); 38 CHECK(AddrIsInMem(addr)); 39 CHECK(AddrIsAlignedByGranularity(addr + size)); 40 CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY)); 41 CHECK(REAL(memset)); 42 FastPoisonShadow(addr, size, value); 43 } 44 45 void PoisonShadowPartialRightRedzone(uptr addr, 46 uptr size, 47 uptr redzone_size, 48 u8 value) { 49 if (!CanPoisonMemory()) return; 50 CHECK(AddrIsAlignedByGranularity(addr)); 51 CHECK(AddrIsInMem(addr)); 52 FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); 53 } 54 55 struct ShadowSegmentEndpoint { 56 u8 *chunk; 57 s8 offset; // in [0, ASAN_SHADOW_GRANULARITY) 58 s8 value; // = *chunk; 59 60 explicit ShadowSegmentEndpoint(uptr address) { 61 chunk = (u8*)MemToShadow(address); 62 offset = address & (ASAN_SHADOW_GRANULARITY - 1); 63 value = *chunk; 64 } 65 }; 66 67 void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { 68 uptr end = ptr + size; 69 if (Verbosity()) { 70 Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", 71 poison ? "" : "un", (void *)ptr, (void *)end, size); 72 if (Verbosity() >= 2) 73 PRINT_CURRENT_STACK(); 74 } 75 CHECK(size); 76 CHECK_LE(size, 4096); 77 CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY)); 78 if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) { 79 *(u8 *)MemToShadow(ptr) = 80 poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0; 81 ptr |= ASAN_SHADOW_GRANULARITY - 1; 82 ptr++; 83 } 84 for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY) 85 *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0; 86 } 87 88 } // namespace __asan 89 90 // ---------------------- Interface ---------------- {{{1 91 using namespace __asan; 92 93 // Current implementation of __asan_(un)poison_memory_region doesn't check 94 // that user program (un)poisons the memory it owns. It poisons memory 95 // conservatively, and unpoisons progressively to make sure asan shadow 96 // mapping invariant is preserved (see detailed mapping description here: 97 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm). 98 // 99 // * if user asks to poison region [left, right), the program poisons 100 // at least [left, AlignDown(right)). 101 // * if user asks to unpoison region [left, right), the program unpoisons 102 // at most [AlignDown(left), right). 103 void __asan_poison_memory_region(void const volatile *addr, uptr size) { 104 if (!flags()->allow_user_poisoning || size == 0) return; 105 uptr beg_addr = (uptr)addr; 106 uptr end_addr = beg_addr + size; 107 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, 108 (void *)end_addr); 109 ShadowSegmentEndpoint beg(beg_addr); 110 ShadowSegmentEndpoint end(end_addr); 111 if (beg.chunk == end.chunk) { 112 CHECK_LT(beg.offset, end.offset); 113 s8 value = beg.value; 114 CHECK_EQ(value, end.value); 115 // We can only poison memory if the byte in end.offset is unaddressable. 116 // No need to re-poison memory if it is poisoned already. 117 if (value > 0 && value <= end.offset) { 118 if (beg.offset > 0) { 119 *beg.chunk = Min(value, beg.offset); 120 } else { 121 *beg.chunk = kAsanUserPoisonedMemoryMagic; 122 } 123 } 124 return; 125 } 126 CHECK_LT(beg.chunk, end.chunk); 127 if (beg.offset > 0) { 128 // Mark bytes from beg.offset as unaddressable. 129 if (beg.value == 0) { 130 *beg.chunk = beg.offset; 131 } else { 132 *beg.chunk = Min(beg.value, beg.offset); 133 } 134 beg.chunk++; 135 } 136 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk); 137 // Poison if byte in end.offset is unaddressable. 138 if (end.value > 0 && end.value <= end.offset) { 139 *end.chunk = kAsanUserPoisonedMemoryMagic; 140 } 141 } 142 143 void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { 144 if (!flags()->allow_user_poisoning || size == 0) return; 145 uptr beg_addr = (uptr)addr; 146 uptr end_addr = beg_addr + size; 147 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, 148 (void *)end_addr); 149 ShadowSegmentEndpoint beg(beg_addr); 150 ShadowSegmentEndpoint end(end_addr); 151 if (beg.chunk == end.chunk) { 152 CHECK_LT(beg.offset, end.offset); 153 s8 value = beg.value; 154 CHECK_EQ(value, end.value); 155 // We unpoison memory bytes up to enbytes up to end.offset if it is not 156 // unpoisoned already. 157 if (value != 0) { 158 *beg.chunk = Max(value, end.offset); 159 } 160 return; 161 } 162 CHECK_LT(beg.chunk, end.chunk); 163 if (beg.offset > 0) { 164 *beg.chunk = 0; 165 beg.chunk++; 166 } 167 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk); 168 if (end.offset > 0 && end.value != 0) { 169 *end.chunk = Max(end.value, end.offset); 170 } 171 } 172 173 int __asan_address_is_poisoned(void const volatile *addr) { 174 return __asan::AddressIsPoisoned((uptr)addr); 175 } 176 177 uptr __asan_region_is_poisoned(uptr beg, uptr size) { 178 if (!size) 179 return 0; 180 uptr end = beg + size; 181 if (!AddrIsInMem(beg)) 182 return beg; 183 if (!AddrIsInMem(end)) 184 return end; 185 CHECK_LT(beg, end); 186 uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY); 187 uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY); 188 uptr shadow_beg = MemToShadow(aligned_b); 189 uptr shadow_end = MemToShadow(aligned_e); 190 // First check the first and the last application bytes, 191 // then check the ASAN_SHADOW_GRANULARITY-aligned region by calling 192 // mem_is_zero on the corresponding shadow. 193 if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) && 194 (shadow_end <= shadow_beg || 195 __sanitizer::mem_is_zero((const char *)shadow_beg, 196 shadow_end - shadow_beg))) 197 return 0; 198 // The fast check failed, so we have a poisoned byte somewhere. 199 // Find it slowly. 200 for (; beg < end; beg++) 201 if (__asan::AddressIsPoisoned(beg)) 202 return beg; 203 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found"); 204 return 0; 205 } 206 207 #define CHECK_SMALL_REGION(p, size, isWrite) \ 208 do { \ 209 uptr __p = reinterpret_cast<uptr>(p); \ 210 uptr __size = size; \ 211 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \ 212 __asan::AddressIsPoisoned(__p + __size - 1))) { \ 213 GET_CURRENT_PC_BP_SP; \ 214 uptr __bad = __asan_region_is_poisoned(__p, __size); \ 215 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\ 216 } \ 217 } while (false) 218 219 220 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 221 u16 __sanitizer_unaligned_load16(const uu16 *p) { 222 CHECK_SMALL_REGION(p, sizeof(*p), false); 223 return *p; 224 } 225 226 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 227 u32 __sanitizer_unaligned_load32(const uu32 *p) { 228 CHECK_SMALL_REGION(p, sizeof(*p), false); 229 return *p; 230 } 231 232 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 233 u64 __sanitizer_unaligned_load64(const uu64 *p) { 234 CHECK_SMALL_REGION(p, sizeof(*p), false); 235 return *p; 236 } 237 238 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 239 void __sanitizer_unaligned_store16(uu16 *p, u16 x) { 240 CHECK_SMALL_REGION(p, sizeof(*p), true); 241 *p = x; 242 } 243 244 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 245 void __sanitizer_unaligned_store32(uu32 *p, u32 x) { 246 CHECK_SMALL_REGION(p, sizeof(*p), true); 247 *p = x; 248 } 249 250 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 251 void __sanitizer_unaligned_store64(uu64 *p, u64 x) { 252 CHECK_SMALL_REGION(p, sizeof(*p), true); 253 *p = x; 254 } 255 256 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 257 void __asan_poison_cxx_array_cookie(uptr p) { 258 if (SANITIZER_WORDSIZE != 64) return; 259 if (!flags()->poison_array_cookie) return; 260 uptr s = MEM_TO_SHADOW(p); 261 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic; 262 } 263 264 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 265 uptr __asan_load_cxx_array_cookie(uptr *p) { 266 if (SANITIZER_WORDSIZE != 64) return *p; 267 if (!flags()->poison_array_cookie) return *p; 268 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p)); 269 u8 sval = *reinterpret_cast<u8*>(s); 270 if (sval == kAsanArrayCookieMagic) return *p; 271 // If sval is not kAsanArrayCookieMagic it can only be freed memory, 272 // which means that we are going to get double-free. So, return 0 to avoid 273 // infinite loop of destructors. We don't want to report a double-free here 274 // though, so print a warning just in case. 275 // CHECK_EQ(sval, kAsanHeapFreeMagic); 276 if (sval == kAsanHeapFreeMagic) { 277 Report("AddressSanitizer: loaded array cookie from free-d memory; " 278 "expect a double-free report\n"); 279 return 0; 280 } 281 // The cookie may remain unpoisoned if e.g. it comes from a custom 282 // operator new defined inside a class. 283 return *p; 284 } 285 286 // This is a simplified version of __asan_(un)poison_memory_region, which 287 // assumes that left border of region to be poisoned is properly aligned. 288 static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { 289 if (size == 0) return; 290 uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1); 291 PoisonShadow(addr, aligned_size, 292 do_poison ? kAsanStackUseAfterScopeMagic : 0); 293 if (size == aligned_size) 294 return; 295 s8 end_offset = (s8)(size - aligned_size); 296 s8* shadow_end = (s8*)MemToShadow(addr + aligned_size); 297 s8 end_value = *shadow_end; 298 if (do_poison) { 299 // If possible, mark all the bytes mapping to last shadow byte as 300 // unaddressable. 301 if (end_value > 0 && end_value <= end_offset) 302 *shadow_end = (s8)kAsanStackUseAfterScopeMagic; 303 } else { 304 // If necessary, mark few first bytes mapping to last shadow byte 305 // as addressable 306 if (end_value != 0) 307 *shadow_end = Max(end_value, end_offset); 308 } 309 } 310 311 void __asan_set_shadow_00(uptr addr, uptr size) { 312 REAL(memset)((void *)addr, 0, size); 313 } 314 315 void __asan_set_shadow_f1(uptr addr, uptr size) { 316 REAL(memset)((void *)addr, 0xf1, size); 317 } 318 319 void __asan_set_shadow_f2(uptr addr, uptr size) { 320 REAL(memset)((void *)addr, 0xf2, size); 321 } 322 323 void __asan_set_shadow_f3(uptr addr, uptr size) { 324 REAL(memset)((void *)addr, 0xf3, size); 325 } 326 327 void __asan_set_shadow_f5(uptr addr, uptr size) { 328 REAL(memset)((void *)addr, 0xf5, size); 329 } 330 331 void __asan_set_shadow_f8(uptr addr, uptr size) { 332 REAL(memset)((void *)addr, 0xf8, size); 333 } 334 335 void __asan_poison_stack_memory(uptr addr, uptr size) { 336 VReport(1, "poisoning: %p %zx\n", (void *)addr, size); 337 PoisonAlignedStackMemory(addr, size, true); 338 } 339 340 void __asan_unpoison_stack_memory(uptr addr, uptr size) { 341 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size); 342 PoisonAlignedStackMemory(addr, size, false); 343 } 344 345 void __sanitizer_annotate_contiguous_container(const void *beg_p, 346 const void *end_p, 347 const void *old_mid_p, 348 const void *new_mid_p) { 349 if (!flags()->detect_container_overflow) return; 350 VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p, 351 new_mid_p); 352 uptr beg = reinterpret_cast<uptr>(beg_p); 353 uptr end = reinterpret_cast<uptr>(end_p); 354 uptr old_mid = reinterpret_cast<uptr>(old_mid_p); 355 uptr new_mid = reinterpret_cast<uptr>(new_mid_p); 356 uptr granularity = ASAN_SHADOW_GRANULARITY; 357 if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end && 358 IsAligned(beg, granularity))) { 359 GET_STACK_TRACE_FATAL_HERE; 360 ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid, 361 &stack); 362 } 363 CHECK_LE(end - beg, 364 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check. 365 366 uptr a = RoundDownTo(Min(old_mid, new_mid), granularity); 367 uptr c = RoundUpTo(Max(old_mid, new_mid), granularity); 368 uptr d1 = RoundDownTo(old_mid, granularity); 369 // uptr d2 = RoundUpTo(old_mid, granularity); 370 // Currently we should be in this state: 371 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good. 372 // Make a quick sanity check that we are indeed in this state. 373 // 374 // FIXME: Two of these three checks are disabled until we fix 375 // https://github.com/google/sanitizers/issues/258. 376 // if (d1 != d2) 377 // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1); 378 if (a + granularity <= d1) 379 CHECK_EQ(*(u8*)MemToShadow(a), 0); 380 // if (d2 + granularity <= c && c <= end) 381 // CHECK_EQ(*(u8 *)MemToShadow(c - granularity), 382 // kAsanContiguousContainerOOBMagic); 383 384 uptr b1 = RoundDownTo(new_mid, granularity); 385 uptr b2 = RoundUpTo(new_mid, granularity); 386 // New state: 387 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good. 388 PoisonShadow(a, b1 - a, 0); 389 PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic); 390 if (b1 != b2) { 391 CHECK_EQ(b2 - b1, granularity); 392 *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1); 393 } 394 } 395 396 const void *__sanitizer_contiguous_container_find_bad_address( 397 const void *beg_p, const void *mid_p, const void *end_p) { 398 if (!flags()->detect_container_overflow) 399 return nullptr; 400 uptr beg = reinterpret_cast<uptr>(beg_p); 401 uptr end = reinterpret_cast<uptr>(end_p); 402 uptr mid = reinterpret_cast<uptr>(mid_p); 403 CHECK_LE(beg, mid); 404 CHECK_LE(mid, end); 405 // Check some bytes starting from beg, some bytes around mid, and some bytes 406 // ending with end. 407 uptr kMaxRangeToCheck = 32; 408 uptr r1_beg = beg; 409 uptr r1_end = Min(beg + kMaxRangeToCheck, mid); 410 uptr r2_beg = Max(beg, mid - kMaxRangeToCheck); 411 uptr r2_end = Min(end, mid + kMaxRangeToCheck); 412 uptr r3_beg = Max(end - kMaxRangeToCheck, mid); 413 uptr r3_end = end; 414 for (uptr i = r1_beg; i < r1_end; i++) 415 if (AddressIsPoisoned(i)) 416 return reinterpret_cast<const void *>(i); 417 for (uptr i = r2_beg; i < mid; i++) 418 if (AddressIsPoisoned(i)) 419 return reinterpret_cast<const void *>(i); 420 for (uptr i = mid; i < r2_end; i++) 421 if (!AddressIsPoisoned(i)) 422 return reinterpret_cast<const void *>(i); 423 for (uptr i = r3_beg; i < r3_end; i++) 424 if (!AddressIsPoisoned(i)) 425 return reinterpret_cast<const void *>(i); 426 return nullptr; 427 } 428 429 int __sanitizer_verify_contiguous_container(const void *beg_p, 430 const void *mid_p, 431 const void *end_p) { 432 return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p, 433 end_p) == nullptr; 434 } 435 436 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 437 void __asan_poison_intra_object_redzone(uptr ptr, uptr size) { 438 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true); 439 } 440 441 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 442 void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) { 443 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false); 444 } 445 446 // --- Implementation of LSan-specific functions --- {{{1 447 namespace __lsan { 448 bool WordIsPoisoned(uptr addr) { 449 return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0); 450 } 451 } 452