1 //===-- asan_report.cpp ---------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // This file contains error reporting code. 12 //===----------------------------------------------------------------------===// 13 14 #include "asan_errors.h" 15 #include "asan_flags.h" 16 #include "asan_descriptions.h" 17 #include "asan_internal.h" 18 #include "asan_mapping.h" 19 #include "asan_report.h" 20 #include "asan_scariness_score.h" 21 #include "asan_stack.h" 22 #include "asan_thread.h" 23 #include "sanitizer_common/sanitizer_common.h" 24 #include "sanitizer_common/sanitizer_flags.h" 25 #include "sanitizer_common/sanitizer_report_decorator.h" 26 #include "sanitizer_common/sanitizer_stackdepot.h" 27 #include "sanitizer_common/sanitizer_symbolizer.h" 28 29 namespace __asan { 30 31 // -------------------- User-specified callbacks ----------------- {{{1 32 static void (*error_report_callback)(const char*); 33 static char *error_message_buffer = nullptr; 34 static uptr error_message_buffer_pos = 0; 35 static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED); 36 static const unsigned kAsanBuggyPcPoolSize = 25; 37 static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; 38 39 void AppendToErrorMessageBuffer(const char *buffer) { 40 BlockingMutexLock l(&error_message_buf_mutex); 41 if (!error_message_buffer) { 42 error_message_buffer = 43 (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__); 44 error_message_buffer_pos = 0; 45 } 46 uptr length = internal_strlen(buffer); 47 RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos); 48 uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos; 49 internal_strncpy(error_message_buffer + error_message_buffer_pos, 50 buffer, remaining); 51 error_message_buffer[kErrorMessageBufferSize - 1] = '\0'; 52 // FIXME: reallocate the buffer instead of truncating the message. 53 error_message_buffer_pos += Min(remaining, length); 54 } 55 56 // ---------------------- Helper functions ----------------------- {{{1 57 58 void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, 59 bool in_shadow, const char *after) { 60 Decorator d; 61 str->append("%s%s%x%x%s%s", before, 62 in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, 63 byte & 15, d.Default(), after); 64 } 65 66 static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, 67 const char *zone_name) { 68 if (zone_ptr) { 69 if (zone_name) { 70 Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", 71 ptr, zone_ptr, zone_name); 72 } else { 73 Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", 74 ptr, zone_ptr); 75 } 76 } else { 77 Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); 78 } 79 } 80 81 // ---------------------- Address Descriptions ------------------- {{{1 82 83 bool ParseFrameDescription(const char *frame_descr, 84 InternalMmapVector<StackVarDescr> *vars) { 85 CHECK(frame_descr); 86 const char *p; 87 // This string is created by the compiler and has the following form: 88 // "n alloc_1 alloc_2 ... alloc_n" 89 // where alloc_i looks like "offset size len ObjectName" 90 // or "offset size len ObjectName:line". 91 uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); 92 if (n_objects == 0) 93 return false; 94 95 for (uptr i = 0; i < n_objects; i++) { 96 uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); 97 uptr size = (uptr)internal_simple_strtoll(p, &p, 10); 98 uptr len = (uptr)internal_simple_strtoll(p, &p, 10); 99 if (beg == 0 || size == 0 || *p != ' ') { 100 return false; 101 } 102 p++; 103 char *colon_pos = internal_strchr(p, ':'); 104 uptr line = 0; 105 uptr name_len = len; 106 if (colon_pos != nullptr && colon_pos < p + len) { 107 name_len = colon_pos - p; 108 line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10); 109 } 110 StackVarDescr var = {beg, size, p, name_len, line}; 111 vars->push_back(var); 112 p += len; 113 } 114 115 return true; 116 } 117 118 // -------------------- Different kinds of reports ----------------- {{{1 119 120 // Use ScopedInErrorReport to run common actions just before and 121 // immediately after printing error report. 122 class ScopedInErrorReport { 123 public: 124 explicit ScopedInErrorReport(bool fatal = false) 125 : halt_on_error_(fatal || flags()->halt_on_error) { 126 // Make sure the registry and sanitizer report mutexes are locked while 127 // we're printing an error report. 128 // We can lock them only here to avoid self-deadlock in case of 129 // recursive reports. 130 asanThreadRegistry().Lock(); 131 Printf( 132 "=================================================================\n"); 133 } 134 135 ~ScopedInErrorReport() { 136 if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { 137 asanThreadRegistry().Unlock(); 138 return; 139 } 140 ASAN_ON_ERROR(); 141 if (current_error_.IsValid()) current_error_.Print(); 142 143 // Make sure the current thread is announced. 144 DescribeThread(GetCurrentThread()); 145 // We may want to grab this lock again when printing stats. 146 asanThreadRegistry().Unlock(); 147 // Print memory stats. 148 if (flags()->print_stats) 149 __asan_print_accumulated_stats(); 150 151 if (common_flags()->print_cmdline) 152 PrintCmdline(); 153 154 if (common_flags()->print_module_map == 2) PrintModuleMap(); 155 156 // Copy the message buffer so that we could start logging without holding a 157 // lock that gets aquired during printing. 158 InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize); 159 { 160 BlockingMutexLock l(&error_message_buf_mutex); 161 internal_memcpy(buffer_copy.data(), 162 error_message_buffer, kErrorMessageBufferSize); 163 // Clear error_message_buffer so that if we find other errors 164 // we don't re-log this error. 165 error_message_buffer_pos = 0; 166 } 167 168 LogFullErrorReport(buffer_copy.data()); 169 170 if (error_report_callback) { 171 error_report_callback(buffer_copy.data()); 172 } 173 174 if (halt_on_error_ && common_flags()->abort_on_error) { 175 // On Android the message is truncated to 512 characters. 176 // FIXME: implement "compact" error format, possibly without, or with 177 // highly compressed stack traces? 178 // FIXME: or just use the summary line as abort message? 179 SetAbortMessage(buffer_copy.data()); 180 } 181 182 // In halt_on_error = false mode, reset the current error object (before 183 // unlocking). 184 if (!halt_on_error_) 185 internal_memset(¤t_error_, 0, sizeof(current_error_)); 186 187 if (halt_on_error_) { 188 Report("ABORTING\n"); 189 Die(); 190 } 191 } 192 193 void ReportError(const ErrorDescription &description) { 194 // Can only report one error per ScopedInErrorReport. 195 CHECK_EQ(current_error_.kind, kErrorKindInvalid); 196 internal_memcpy(¤t_error_, &description, sizeof(current_error_)); 197 } 198 199 static ErrorDescription &CurrentError() { 200 return current_error_; 201 } 202 203 private: 204 ScopedErrorReportLock error_report_lock_; 205 // Error currently being reported. This enables the destructor to interact 206 // with the debugger and point it to an error description. 207 static ErrorDescription current_error_; 208 bool halt_on_error_; 209 }; 210 211 ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); 212 213 void ReportDeadlySignal(const SignalContext &sig) { 214 ScopedInErrorReport in_report(/*fatal*/ true); 215 ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig); 216 in_report.ReportError(error); 217 } 218 219 void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { 220 ScopedInErrorReport in_report; 221 ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); 222 in_report.ReportError(error); 223 } 224 225 void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, 226 uptr delete_alignment, 227 BufferedStackTrace *free_stack) { 228 ScopedInErrorReport in_report; 229 ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, 230 delete_size, delete_alignment); 231 in_report.ReportError(error); 232 } 233 234 void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { 235 ScopedInErrorReport in_report; 236 ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); 237 in_report.ReportError(error); 238 } 239 240 void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, 241 AllocType alloc_type, 242 AllocType dealloc_type) { 243 ScopedInErrorReport in_report; 244 ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, 245 alloc_type, dealloc_type); 246 in_report.ReportError(error); 247 } 248 249 void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { 250 ScopedInErrorReport in_report; 251 ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); 252 in_report.ReportError(error); 253 } 254 255 void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, 256 BufferedStackTrace *stack) { 257 ScopedInErrorReport in_report; 258 ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, 259 addr); 260 in_report.ReportError(error); 261 } 262 263 void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { 264 ScopedInErrorReport in_report(/*fatal*/ true); 265 ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); 266 in_report.ReportError(error); 267 } 268 269 void ReportReallocArrayOverflow(uptr count, uptr size, 270 BufferedStackTrace *stack) { 271 ScopedInErrorReport in_report(/*fatal*/ true); 272 ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size); 273 in_report.ReportError(error); 274 } 275 276 void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { 277 ScopedInErrorReport in_report(/*fatal*/ true); 278 ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); 279 in_report.ReportError(error); 280 } 281 282 void ReportInvalidAllocationAlignment(uptr alignment, 283 BufferedStackTrace *stack) { 284 ScopedInErrorReport in_report(/*fatal*/ true); 285 ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, 286 alignment); 287 in_report.ReportError(error); 288 } 289 290 void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, 291 BufferedStackTrace *stack) { 292 ScopedInErrorReport in_report(/*fatal*/ true); 293 ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, 294 size, alignment); 295 in_report.ReportError(error); 296 } 297 298 void ReportInvalidPosixMemalignAlignment(uptr alignment, 299 BufferedStackTrace *stack) { 300 ScopedInErrorReport in_report(/*fatal*/ true); 301 ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, 302 alignment); 303 in_report.ReportError(error); 304 } 305 306 void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, 307 BufferedStackTrace *stack) { 308 ScopedInErrorReport in_report(/*fatal*/ true); 309 ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, 310 total_size, max_size); 311 in_report.ReportError(error); 312 } 313 314 void ReportRssLimitExceeded(BufferedStackTrace *stack) { 315 ScopedInErrorReport in_report(/*fatal*/ true); 316 ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); 317 in_report.ReportError(error); 318 } 319 320 void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { 321 ScopedInErrorReport in_report(/*fatal*/ true); 322 ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); 323 in_report.ReportError(error); 324 } 325 326 void ReportStringFunctionMemoryRangesOverlap(const char *function, 327 const char *offset1, uptr length1, 328 const char *offset2, uptr length2, 329 BufferedStackTrace *stack) { 330 ScopedInErrorReport in_report; 331 ErrorStringFunctionMemoryRangesOverlap error( 332 GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, 333 length2, function); 334 in_report.ReportError(error); 335 } 336 337 void ReportStringFunctionSizeOverflow(uptr offset, uptr size, 338 BufferedStackTrace *stack) { 339 ScopedInErrorReport in_report; 340 ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, 341 size); 342 in_report.ReportError(error); 343 } 344 345 void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, 346 uptr old_mid, uptr new_mid, 347 BufferedStackTrace *stack) { 348 ScopedInErrorReport in_report; 349 ErrorBadParamsToAnnotateContiguousContainer error( 350 GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); 351 in_report.ReportError(error); 352 } 353 354 void ReportODRViolation(const __asan_global *g1, u32 stack_id1, 355 const __asan_global *g2, u32 stack_id2) { 356 ScopedInErrorReport in_report; 357 ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, 358 stack_id2); 359 in_report.ReportError(error); 360 } 361 362 // ----------------------- CheckForInvalidPointerPair ----------- {{{1 363 static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, 364 uptr a1, uptr a2) { 365 ScopedInErrorReport in_report; 366 ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); 367 in_report.ReportError(error); 368 } 369 370 static bool IsInvalidPointerPair(uptr a1, uptr a2) { 371 if (a1 == a2) 372 return false; 373 374 // 256B in shadow memory can be iterated quite fast 375 static const uptr kMaxOffset = 2048; 376 377 uptr left = a1 < a2 ? a1 : a2; 378 uptr right = a1 < a2 ? a2 : a1; 379 uptr offset = right - left; 380 if (offset <= kMaxOffset) 381 return __asan_region_is_poisoned(left, offset); 382 383 AsanThread *t = GetCurrentThread(); 384 385 // check whether left is a stack memory pointer 386 if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) { 387 uptr shadow_offset2 = t->GetStackVariableShadowStart(right); 388 return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2; 389 } 390 391 // check whether left is a heap memory address 392 HeapAddressDescription hdesc1, hdesc2; 393 if (GetHeapAddressInformation(left, 0, &hdesc1) && 394 hdesc1.chunk_access.access_type == kAccessTypeInside) 395 return !GetHeapAddressInformation(right, 0, &hdesc2) || 396 hdesc2.chunk_access.access_type != kAccessTypeInside || 397 hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin; 398 399 // check whether left is an address of a global variable 400 GlobalAddressDescription gdesc1, gdesc2; 401 if (GetGlobalAddressInformation(left, 0, &gdesc1)) 402 return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) || 403 !gdesc1.PointsInsideTheSameVariable(gdesc2); 404 405 if (t->GetStackVariableShadowStart(right) || 406 GetHeapAddressInformation(right, 0, &hdesc2) || 407 GetGlobalAddressInformation(right - 1, 0, &gdesc2)) 408 return true; 409 410 // At this point we know nothing about both a1 and a2 addresses. 411 return false; 412 } 413 414 static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { 415 switch (flags()->detect_invalid_pointer_pairs) { 416 case 0: 417 return; 418 case 1: 419 if (p1 == nullptr || p2 == nullptr) 420 return; 421 break; 422 } 423 424 uptr a1 = reinterpret_cast<uptr>(p1); 425 uptr a2 = reinterpret_cast<uptr>(p2); 426 427 if (IsInvalidPointerPair(a1, a2)) { 428 GET_CALLER_PC_BP_SP; 429 ReportInvalidPointerPair(pc, bp, sp, a1, a2); 430 } 431 } 432 // ----------------------- Mac-specific reports ----------------- {{{1 433 434 void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, 435 BufferedStackTrace *stack) { 436 ScopedInErrorReport in_report; 437 Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" 438 "This is an unrecoverable problem, exiting now.\n", 439 addr); 440 PrintZoneForPointer(addr, zone_ptr, zone_name); 441 stack->Print(); 442 DescribeAddressIfHeap(addr); 443 } 444 445 // -------------- SuppressErrorReport -------------- {{{1 446 // Avoid error reports duplicating for ASan recover mode. 447 static bool SuppressErrorReport(uptr pc) { 448 if (!common_flags()->suppress_equal_pcs) return false; 449 for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { 450 uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]); 451 if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp, 452 pc, memory_order_relaxed)) 453 return false; 454 if (cmp == pc) return true; 455 } 456 Die(); 457 } 458 459 void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, 460 uptr access_size, u32 exp, bool fatal) { 461 if (!fatal && SuppressErrorReport(pc)) return; 462 ENABLE_FRAME_POINTER; 463 464 // Optimization experiments. 465 // The experiments can be used to evaluate potential optimizations that remove 466 // instrumentation (assess false negatives). Instead of completely removing 467 // some instrumentation, compiler can emit special calls into runtime 468 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass 469 // mask of experiments (exp). 470 // The reaction to a non-zero value of exp is to be defined. 471 (void)exp; 472 473 ScopedInErrorReport in_report(fatal); 474 ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, 475 access_size); 476 in_report.ReportError(error); 477 } 478 479 } // namespace __asan 480 481 // --------------------------- Interface --------------------- {{{1 482 using namespace __asan; 483 484 void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, 485 uptr access_size, u32 exp) { 486 ENABLE_FRAME_POINTER; 487 bool fatal = flags()->halt_on_error; 488 ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); 489 } 490 491 void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { 492 BlockingMutexLock l(&error_message_buf_mutex); 493 error_report_callback = callback; 494 } 495 496 void __asan_describe_address(uptr addr) { 497 // Thread registry must be locked while we're describing an address. 498 asanThreadRegistry().Lock(); 499 PrintAddressDescription(addr, 1, ""); 500 asanThreadRegistry().Unlock(); 501 } 502 503 int __asan_report_present() { 504 return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; 505 } 506 507 uptr __asan_get_report_pc() { 508 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 509 return ScopedInErrorReport::CurrentError().Generic.pc; 510 return 0; 511 } 512 513 uptr __asan_get_report_bp() { 514 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 515 return ScopedInErrorReport::CurrentError().Generic.bp; 516 return 0; 517 } 518 519 uptr __asan_get_report_sp() { 520 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 521 return ScopedInErrorReport::CurrentError().Generic.sp; 522 return 0; 523 } 524 525 uptr __asan_get_report_address() { 526 ErrorDescription &err = ScopedInErrorReport::CurrentError(); 527 if (err.kind == kErrorKindGeneric) 528 return err.Generic.addr_description.Address(); 529 else if (err.kind == kErrorKindDoubleFree) 530 return err.DoubleFree.addr_description.addr; 531 return 0; 532 } 533 534 int __asan_get_report_access_type() { 535 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 536 return ScopedInErrorReport::CurrentError().Generic.is_write; 537 return 0; 538 } 539 540 uptr __asan_get_report_access_size() { 541 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 542 return ScopedInErrorReport::CurrentError().Generic.access_size; 543 return 0; 544 } 545 546 const char *__asan_get_report_description() { 547 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 548 return ScopedInErrorReport::CurrentError().Generic.bug_descr; 549 return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); 550 } 551 552 extern "C" { 553 SANITIZER_INTERFACE_ATTRIBUTE 554 void __sanitizer_ptr_sub(void *a, void *b) { 555 CheckForInvalidPointerPair(a, b); 556 } 557 SANITIZER_INTERFACE_ATTRIBUTE 558 void __sanitizer_ptr_cmp(void *a, void *b) { 559 CheckForInvalidPointerPair(a, b); 560 } 561 } // extern "C" 562 563 // Provide default implementation of __asan_on_error that does nothing 564 // and may be overriden by user. 565 SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} 566