1 //===-- asan_report.cpp ---------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // This file contains error reporting code. 12 //===----------------------------------------------------------------------===// 13 14 #include "asan_errors.h" 15 #include "asan_flags.h" 16 #include "asan_descriptions.h" 17 #include "asan_internal.h" 18 #include "asan_mapping.h" 19 #include "asan_report.h" 20 #include "asan_scariness_score.h" 21 #include "asan_stack.h" 22 #include "asan_thread.h" 23 #include "sanitizer_common/sanitizer_common.h" 24 #include "sanitizer_common/sanitizer_flags.h" 25 #include "sanitizer_common/sanitizer_report_decorator.h" 26 #include "sanitizer_common/sanitizer_stackdepot.h" 27 #include "sanitizer_common/sanitizer_symbolizer.h" 28 29 namespace __asan { 30 31 // -------------------- User-specified callbacks ----------------- {{{1 32 static void (*error_report_callback)(const char*); 33 static char *error_message_buffer = nullptr; 34 static uptr error_message_buffer_pos = 0; 35 static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED); 36 static const unsigned kAsanBuggyPcPoolSize = 25; 37 static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; 38 39 void AppendToErrorMessageBuffer(const char *buffer) { 40 BlockingMutexLock l(&error_message_buf_mutex); 41 if (!error_message_buffer) { 42 error_message_buffer = 43 (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__); 44 error_message_buffer_pos = 0; 45 } 46 uptr length = internal_strlen(buffer); 47 RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos); 48 uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos; 49 internal_strncpy(error_message_buffer + error_message_buffer_pos, 50 buffer, remaining); 51 error_message_buffer[kErrorMessageBufferSize - 1] = '\0'; 52 // FIXME: reallocate the buffer instead of truncating the message. 53 error_message_buffer_pos += Min(remaining, length); 54 } 55 56 // ---------------------- Helper functions ----------------------- {{{1 57 58 void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, 59 bool in_shadow, const char *after) { 60 Decorator d; 61 str->append("%s%s%x%x%s%s", before, 62 in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, 63 byte & 15, d.Default(), after); 64 } 65 66 static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, 67 const char *zone_name) { 68 if (zone_ptr) { 69 if (zone_name) { 70 Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", 71 ptr, zone_ptr, zone_name); 72 } else { 73 Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", 74 ptr, zone_ptr); 75 } 76 } else { 77 Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); 78 } 79 } 80 81 // ---------------------- Address Descriptions ------------------- {{{1 82 83 bool ParseFrameDescription(const char *frame_descr, 84 InternalMmapVector<StackVarDescr> *vars) { 85 CHECK(frame_descr); 86 const char *p; 87 // This string is created by the compiler and has the following form: 88 // "n alloc_1 alloc_2 ... alloc_n" 89 // where alloc_i looks like "offset size len ObjectName" 90 // or "offset size len ObjectName:line". 91 uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); 92 if (n_objects == 0) 93 return false; 94 95 for (uptr i = 0; i < n_objects; i++) { 96 uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); 97 uptr size = (uptr)internal_simple_strtoll(p, &p, 10); 98 uptr len = (uptr)internal_simple_strtoll(p, &p, 10); 99 if (beg == 0 || size == 0 || *p != ' ') { 100 return false; 101 } 102 p++; 103 char *colon_pos = internal_strchr(p, ':'); 104 uptr line = 0; 105 uptr name_len = len; 106 if (colon_pos != nullptr && colon_pos < p + len) { 107 name_len = colon_pos - p; 108 line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10); 109 } 110 StackVarDescr var = {beg, size, p, name_len, line}; 111 vars->push_back(var); 112 p += len; 113 } 114 115 return true; 116 } 117 118 // -------------------- Different kinds of reports ----------------- {{{1 119 120 // Use ScopedInErrorReport to run common actions just before and 121 // immediately after printing error report. 122 class ScopedInErrorReport { 123 public: 124 explicit ScopedInErrorReport(bool fatal = false) 125 : halt_on_error_(fatal || flags()->halt_on_error) { 126 // Make sure the registry and sanitizer report mutexes are locked while 127 // we're printing an error report. 128 // We can lock them only here to avoid self-deadlock in case of 129 // recursive reports. 130 asanThreadRegistry().Lock(); 131 Printf( 132 "=================================================================\n"); 133 } 134 135 ~ScopedInErrorReport() { 136 if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { 137 asanThreadRegistry().Unlock(); 138 return; 139 } 140 ASAN_ON_ERROR(); 141 if (current_error_.IsValid()) current_error_.Print(); 142 143 // Make sure the current thread is announced. 144 DescribeThread(GetCurrentThread()); 145 // We may want to grab this lock again when printing stats. 146 asanThreadRegistry().Unlock(); 147 // Print memory stats. 148 if (flags()->print_stats) 149 __asan_print_accumulated_stats(); 150 151 if (common_flags()->print_cmdline) 152 PrintCmdline(); 153 154 if (common_flags()->print_module_map == 2) 155 DumpProcessMap(); 156 157 // Copy the message buffer so that we could start logging without holding a 158 // lock that gets aquired during printing. 159 InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize); 160 { 161 BlockingMutexLock l(&error_message_buf_mutex); 162 internal_memcpy(buffer_copy.data(), 163 error_message_buffer, kErrorMessageBufferSize); 164 // Clear error_message_buffer so that if we find other errors 165 // we don't re-log this error. 166 error_message_buffer_pos = 0; 167 } 168 169 LogFullErrorReport(buffer_copy.data()); 170 171 if (error_report_callback) { 172 error_report_callback(buffer_copy.data()); 173 } 174 175 if (halt_on_error_ && common_flags()->abort_on_error) { 176 // On Android the message is truncated to 512 characters. 177 // FIXME: implement "compact" error format, possibly without, or with 178 // highly compressed stack traces? 179 // FIXME: or just use the summary line as abort message? 180 SetAbortMessage(buffer_copy.data()); 181 } 182 183 // In halt_on_error = false mode, reset the current error object (before 184 // unlocking). 185 if (!halt_on_error_) 186 internal_memset(¤t_error_, 0, sizeof(current_error_)); 187 188 if (halt_on_error_) { 189 Report("ABORTING\n"); 190 Die(); 191 } 192 } 193 194 void ReportError(const ErrorDescription &description) { 195 // Can only report one error per ScopedInErrorReport. 196 CHECK_EQ(current_error_.kind, kErrorKindInvalid); 197 internal_memcpy(¤t_error_, &description, sizeof(current_error_)); 198 } 199 200 static ErrorDescription &CurrentError() { 201 return current_error_; 202 } 203 204 private: 205 ScopedErrorReportLock error_report_lock_; 206 // Error currently being reported. This enables the destructor to interact 207 // with the debugger and point it to an error description. 208 static ErrorDescription current_error_; 209 bool halt_on_error_; 210 }; 211 212 ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); 213 214 void ReportDeadlySignal(const SignalContext &sig) { 215 ScopedInErrorReport in_report(/*fatal*/ true); 216 ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig); 217 in_report.ReportError(error); 218 } 219 220 void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { 221 ScopedInErrorReport in_report; 222 ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); 223 in_report.ReportError(error); 224 } 225 226 void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, 227 uptr delete_alignment, 228 BufferedStackTrace *free_stack) { 229 ScopedInErrorReport in_report; 230 ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, 231 delete_size, delete_alignment); 232 in_report.ReportError(error); 233 } 234 235 void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { 236 ScopedInErrorReport in_report; 237 ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); 238 in_report.ReportError(error); 239 } 240 241 void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, 242 AllocType alloc_type, 243 AllocType dealloc_type) { 244 ScopedInErrorReport in_report; 245 ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, 246 alloc_type, dealloc_type); 247 in_report.ReportError(error); 248 } 249 250 void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { 251 ScopedInErrorReport in_report; 252 ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); 253 in_report.ReportError(error); 254 } 255 256 void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, 257 BufferedStackTrace *stack) { 258 ScopedInErrorReport in_report; 259 ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, 260 addr); 261 in_report.ReportError(error); 262 } 263 264 void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { 265 ScopedInErrorReport in_report(/*fatal*/ true); 266 ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); 267 in_report.ReportError(error); 268 } 269 270 void ReportReallocArrayOverflow(uptr count, uptr size, 271 BufferedStackTrace *stack) { 272 ScopedInErrorReport in_report(/*fatal*/ true); 273 ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size); 274 in_report.ReportError(error); 275 } 276 277 void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { 278 ScopedInErrorReport in_report(/*fatal*/ true); 279 ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); 280 in_report.ReportError(error); 281 } 282 283 void ReportInvalidAllocationAlignment(uptr alignment, 284 BufferedStackTrace *stack) { 285 ScopedInErrorReport in_report(/*fatal*/ true); 286 ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, 287 alignment); 288 in_report.ReportError(error); 289 } 290 291 void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, 292 BufferedStackTrace *stack) { 293 ScopedInErrorReport in_report(/*fatal*/ true); 294 ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, 295 size, alignment); 296 in_report.ReportError(error); 297 } 298 299 void ReportInvalidPosixMemalignAlignment(uptr alignment, 300 BufferedStackTrace *stack) { 301 ScopedInErrorReport in_report(/*fatal*/ true); 302 ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, 303 alignment); 304 in_report.ReportError(error); 305 } 306 307 void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, 308 BufferedStackTrace *stack) { 309 ScopedInErrorReport in_report(/*fatal*/ true); 310 ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, 311 total_size, max_size); 312 in_report.ReportError(error); 313 } 314 315 void ReportRssLimitExceeded(BufferedStackTrace *stack) { 316 ScopedInErrorReport in_report(/*fatal*/ true); 317 ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); 318 in_report.ReportError(error); 319 } 320 321 void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { 322 ScopedInErrorReport in_report(/*fatal*/ true); 323 ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); 324 in_report.ReportError(error); 325 } 326 327 void ReportStringFunctionMemoryRangesOverlap(const char *function, 328 const char *offset1, uptr length1, 329 const char *offset2, uptr length2, 330 BufferedStackTrace *stack) { 331 ScopedInErrorReport in_report; 332 ErrorStringFunctionMemoryRangesOverlap error( 333 GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, 334 length2, function); 335 in_report.ReportError(error); 336 } 337 338 void ReportStringFunctionSizeOverflow(uptr offset, uptr size, 339 BufferedStackTrace *stack) { 340 ScopedInErrorReport in_report; 341 ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, 342 size); 343 in_report.ReportError(error); 344 } 345 346 void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, 347 uptr old_mid, uptr new_mid, 348 BufferedStackTrace *stack) { 349 ScopedInErrorReport in_report; 350 ErrorBadParamsToAnnotateContiguousContainer error( 351 GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); 352 in_report.ReportError(error); 353 } 354 355 void ReportODRViolation(const __asan_global *g1, u32 stack_id1, 356 const __asan_global *g2, u32 stack_id2) { 357 ScopedInErrorReport in_report; 358 ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, 359 stack_id2); 360 in_report.ReportError(error); 361 } 362 363 // ----------------------- CheckForInvalidPointerPair ----------- {{{1 364 static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, 365 uptr a1, uptr a2) { 366 ScopedInErrorReport in_report; 367 ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); 368 in_report.ReportError(error); 369 } 370 371 static bool IsInvalidPointerPair(uptr a1, uptr a2) { 372 if (a1 == a2) 373 return false; 374 375 // 256B in shadow memory can be iterated quite fast 376 static const uptr kMaxOffset = 2048; 377 378 uptr left = a1 < a2 ? a1 : a2; 379 uptr right = a1 < a2 ? a2 : a1; 380 uptr offset = right - left; 381 if (offset <= kMaxOffset) 382 return __asan_region_is_poisoned(left, offset); 383 384 AsanThread *t = GetCurrentThread(); 385 386 // check whether left is a stack memory pointer 387 if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) { 388 uptr shadow_offset2 = t->GetStackVariableShadowStart(right); 389 return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2; 390 } 391 392 // check whether left is a heap memory address 393 HeapAddressDescription hdesc1, hdesc2; 394 if (GetHeapAddressInformation(left, 0, &hdesc1) && 395 hdesc1.chunk_access.access_type == kAccessTypeInside) 396 return !GetHeapAddressInformation(right, 0, &hdesc2) || 397 hdesc2.chunk_access.access_type != kAccessTypeInside || 398 hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin; 399 400 // check whether left is an address of a global variable 401 GlobalAddressDescription gdesc1, gdesc2; 402 if (GetGlobalAddressInformation(left, 0, &gdesc1)) 403 return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) || 404 !gdesc1.PointsInsideTheSameVariable(gdesc2); 405 406 if (t->GetStackVariableShadowStart(right) || 407 GetHeapAddressInformation(right, 0, &hdesc2) || 408 GetGlobalAddressInformation(right - 1, 0, &gdesc2)) 409 return true; 410 411 // At this point we know nothing about both a1 and a2 addresses. 412 return false; 413 } 414 415 static inline void CheckForInvalidPointerPair(void *p1, void *p2) { 416 switch (flags()->detect_invalid_pointer_pairs) { 417 case 0: 418 return; 419 case 1: 420 if (p1 == nullptr || p2 == nullptr) 421 return; 422 break; 423 } 424 425 uptr a1 = reinterpret_cast<uptr>(p1); 426 uptr a2 = reinterpret_cast<uptr>(p2); 427 428 if (IsInvalidPointerPair(a1, a2)) { 429 GET_CALLER_PC_BP_SP; 430 ReportInvalidPointerPair(pc, bp, sp, a1, a2); 431 } 432 } 433 // ----------------------- Mac-specific reports ----------------- {{{1 434 435 void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, 436 BufferedStackTrace *stack) { 437 ScopedInErrorReport in_report; 438 Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" 439 "This is an unrecoverable problem, exiting now.\n", 440 addr); 441 PrintZoneForPointer(addr, zone_ptr, zone_name); 442 stack->Print(); 443 DescribeAddressIfHeap(addr); 444 } 445 446 // -------------- SuppressErrorReport -------------- {{{1 447 // Avoid error reports duplicating for ASan recover mode. 448 static bool SuppressErrorReport(uptr pc) { 449 if (!common_flags()->suppress_equal_pcs) return false; 450 for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { 451 uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]); 452 if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp, 453 pc, memory_order_relaxed)) 454 return false; 455 if (cmp == pc) return true; 456 } 457 Die(); 458 } 459 460 void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, 461 uptr access_size, u32 exp, bool fatal) { 462 if (!fatal && SuppressErrorReport(pc)) return; 463 ENABLE_FRAME_POINTER; 464 465 // Optimization experiments. 466 // The experiments can be used to evaluate potential optimizations that remove 467 // instrumentation (assess false negatives). Instead of completely removing 468 // some instrumentation, compiler can emit special calls into runtime 469 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass 470 // mask of experiments (exp). 471 // The reaction to a non-zero value of exp is to be defined. 472 (void)exp; 473 474 ScopedInErrorReport in_report(fatal); 475 ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, 476 access_size); 477 in_report.ReportError(error); 478 } 479 480 } // namespace __asan 481 482 // --------------------------- Interface --------------------- {{{1 483 using namespace __asan; 484 485 void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, 486 uptr access_size, u32 exp) { 487 ENABLE_FRAME_POINTER; 488 bool fatal = flags()->halt_on_error; 489 ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); 490 } 491 492 void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { 493 BlockingMutexLock l(&error_message_buf_mutex); 494 error_report_callback = callback; 495 } 496 497 void __asan_describe_address(uptr addr) { 498 // Thread registry must be locked while we're describing an address. 499 asanThreadRegistry().Lock(); 500 PrintAddressDescription(addr, 1, ""); 501 asanThreadRegistry().Unlock(); 502 } 503 504 int __asan_report_present() { 505 return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; 506 } 507 508 uptr __asan_get_report_pc() { 509 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 510 return ScopedInErrorReport::CurrentError().Generic.pc; 511 return 0; 512 } 513 514 uptr __asan_get_report_bp() { 515 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 516 return ScopedInErrorReport::CurrentError().Generic.bp; 517 return 0; 518 } 519 520 uptr __asan_get_report_sp() { 521 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 522 return ScopedInErrorReport::CurrentError().Generic.sp; 523 return 0; 524 } 525 526 uptr __asan_get_report_address() { 527 ErrorDescription &err = ScopedInErrorReport::CurrentError(); 528 if (err.kind == kErrorKindGeneric) 529 return err.Generic.addr_description.Address(); 530 else if (err.kind == kErrorKindDoubleFree) 531 return err.DoubleFree.addr_description.addr; 532 return 0; 533 } 534 535 int __asan_get_report_access_type() { 536 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 537 return ScopedInErrorReport::CurrentError().Generic.is_write; 538 return 0; 539 } 540 541 uptr __asan_get_report_access_size() { 542 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 543 return ScopedInErrorReport::CurrentError().Generic.access_size; 544 return 0; 545 } 546 547 const char *__asan_get_report_description() { 548 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 549 return ScopedInErrorReport::CurrentError().Generic.bug_descr; 550 return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); 551 } 552 553 extern "C" { 554 SANITIZER_INTERFACE_ATTRIBUTE 555 void __sanitizer_ptr_sub(void *a, void *b) { 556 CheckForInvalidPointerPair(a, b); 557 } 558 SANITIZER_INTERFACE_ATTRIBUTE 559 void __sanitizer_ptr_cmp(void *a, void *b) { 560 CheckForInvalidPointerPair(a, b); 561 } 562 } // extern "C" 563 564 // Provide default implementation of __asan_on_error that does nothing 565 // and may be overriden by user. 566 SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} 567