1 //===-- hwasan_report.cpp -------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of HWAddressSanitizer. 10 // 11 // Error reporting. 12 //===----------------------------------------------------------------------===// 13 14 #include "hwasan_report.h" 15 16 #include <dlfcn.h> 17 18 #include "hwasan.h" 19 #include "hwasan_allocator.h" 20 #include "hwasan_globals.h" 21 #include "hwasan_mapping.h" 22 #include "hwasan_thread.h" 23 #include "hwasan_thread_list.h" 24 #include "sanitizer_common/sanitizer_allocator_internal.h" 25 #include "sanitizer_common/sanitizer_array_ref.h" 26 #include "sanitizer_common/sanitizer_common.h" 27 #include "sanitizer_common/sanitizer_flags.h" 28 #include "sanitizer_common/sanitizer_mutex.h" 29 #include "sanitizer_common/sanitizer_report_decorator.h" 30 #include "sanitizer_common/sanitizer_stackdepot.h" 31 #include "sanitizer_common/sanitizer_stacktrace_printer.h" 32 #include "sanitizer_common/sanitizer_symbolizer.h" 33 34 using namespace __sanitizer; 35 36 namespace __hwasan { 37 38 class ScopedReport { 39 public: 40 ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) { 41 Lock lock(&error_message_lock_); 42 error_message_ptr_ = fatal ? &error_message_ : nullptr; 43 ++hwasan_report_count; 44 } 45 46 ~ScopedReport() { 47 void (*report_cb)(const char *); 48 { 49 Lock lock(&error_message_lock_); 50 report_cb = error_report_callback_; 51 error_message_ptr_ = nullptr; 52 } 53 if (report_cb) 54 report_cb(error_message_.data()); 55 if (fatal) 56 SetAbortMessage(error_message_.data()); 57 if (common_flags()->print_module_map >= 2 || 58 (fatal && common_flags()->print_module_map)) 59 DumpProcessMap(); 60 if (fatal) 61 Die(); 62 } 63 64 static void MaybeAppendToErrorMessage(const char *msg) { 65 Lock lock(&error_message_lock_); 66 if (!error_message_ptr_) 67 return; 68 uptr len = internal_strlen(msg); 69 uptr old_size = error_message_ptr_->size(); 70 error_message_ptr_->resize(old_size + len); 71 // overwrite old trailing '\0', keep new trailing '\0' untouched. 72 internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len); 73 } 74 75 static void SetErrorReportCallback(void (*callback)(const char *)) { 76 Lock lock(&error_message_lock_); 77 error_report_callback_ = callback; 78 } 79 80 private: 81 ScopedErrorReportLock error_report_lock_; 82 InternalMmapVector<char> error_message_; 83 bool fatal; 84 85 static InternalMmapVector<char> *error_message_ptr_; 86 static Mutex error_message_lock_; 87 static void (*error_report_callback_)(const char *); 88 }; 89 90 InternalMmapVector<char> *ScopedReport::error_message_ptr_; 91 Mutex ScopedReport::error_message_lock_; 92 void (*ScopedReport::error_report_callback_)(const char *); 93 94 // If there is an active ScopedReport, append to its error message. 95 void AppendToErrorMessageBuffer(const char *buffer) { 96 ScopedReport::MaybeAppendToErrorMessage(buffer); 97 } 98 99 static StackTrace GetStackTraceFromId(u32 id) { 100 CHECK(id); 101 StackTrace res = StackDepotGet(id); 102 CHECK(res.trace); 103 return res; 104 } 105 106 static void MaybePrintAndroidHelpUrl() { 107 #if SANITIZER_ANDROID 108 Printf( 109 "Learn more about HWASan reports: " 110 "https://source.android.com/docs/security/test/memory-safety/" 111 "hwasan-reports\n"); 112 #endif 113 } 114 115 // A RAII object that holds a copy of the current thread stack ring buffer. 116 // The actual stack buffer may change while we are iterating over it (for 117 // example, Printf may call syslog() which can itself be built with hwasan). 118 class SavedStackAllocations { 119 public: 120 SavedStackAllocations(StackAllocationsRingBuffer *rb) { 121 uptr size = rb->size() * sizeof(uptr); 122 void *storage = 123 MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations"); 124 new (&rb_) StackAllocationsRingBuffer(*rb, storage); 125 } 126 127 ~SavedStackAllocations() { 128 StackAllocationsRingBuffer *rb = get(); 129 UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr)); 130 } 131 132 StackAllocationsRingBuffer *get() { 133 return (StackAllocationsRingBuffer *)&rb_; 134 } 135 136 private: 137 uptr rb_; 138 }; 139 140 class Decorator: public __sanitizer::SanitizerCommonDecorator { 141 public: 142 Decorator() : SanitizerCommonDecorator() { } 143 const char *Access() { return Blue(); } 144 const char *Allocation() const { return Magenta(); } 145 const char *Origin() const { return Magenta(); } 146 const char *Name() const { return Green(); } 147 const char *Location() { return Green(); } 148 const char *Thread() { return Green(); } 149 }; 150 151 static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr, 152 HeapAllocationRecord *har, uptr *ring_index, 153 uptr *num_matching_addrs, 154 uptr *num_matching_addrs_4b) { 155 if (!rb) return false; 156 157 *num_matching_addrs = 0; 158 *num_matching_addrs_4b = 0; 159 for (uptr i = 0, size = rb->size(); i < size; i++) { 160 auto h = (*rb)[i]; 161 if (h.tagged_addr <= tagged_addr && 162 h.tagged_addr + h.requested_size > tagged_addr) { 163 *har = h; 164 *ring_index = i; 165 return true; 166 } 167 168 // Measure the number of heap ring buffer entries that would have matched 169 // if we had only one entry per address (e.g. if the ring buffer data was 170 // stored at the address itself). This will help us tune the allocator 171 // implementation for MTE. 172 if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) && 173 UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) { 174 ++*num_matching_addrs; 175 } 176 177 // Measure the number of heap ring buffer entries that would have matched 178 // if we only had 4 tag bits, which is the case for MTE. 179 auto untag_4b = [](uptr p) { 180 return p & ((1ULL << 60) - 1); 181 }; 182 if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) && 183 untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) { 184 ++*num_matching_addrs_4b; 185 } 186 } 187 return false; 188 } 189 190 static void PrintStackAllocations(StackAllocationsRingBuffer *sa, 191 tag_t addr_tag, uptr untagged_addr) { 192 uptr frames = Min((uptr)flags()->stack_history_size, sa->size()); 193 bool found_local = false; 194 for (uptr i = 0; i < frames; i++) { 195 const uptr *record_addr = &(*sa)[i]; 196 uptr record = *record_addr; 197 if (!record) 198 break; 199 tag_t base_tag = 200 reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift; 201 uptr fp = (record >> kRecordFPShift) << kRecordFPLShift; 202 uptr pc_mask = (1ULL << kRecordFPShift) - 1; 203 uptr pc = record & pc_mask; 204 FrameInfo frame; 205 if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) { 206 for (LocalInfo &local : frame.locals) { 207 if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset) 208 continue; 209 tag_t obj_tag = base_tag ^ local.tag_offset; 210 if (obj_tag != addr_tag) 211 continue; 212 // Calculate the offset from the object address to the faulting 213 // address. Because we only store bits 4-19 of FP (bits 0-3 are 214 // guaranteed to be zero), the calculation is performed mod 2^20 and may 215 // harmlessly underflow if the address mod 2^20 is below the object 216 // address. 217 uptr obj_offset = 218 (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1); 219 if (obj_offset >= local.size) 220 continue; 221 if (!found_local) { 222 Printf("Potentially referenced stack objects:\n"); 223 found_local = true; 224 } 225 Printf(" %s in %s %s:%d\n", local.name, local.function_name, 226 local.decl_file, local.decl_line); 227 } 228 frame.Clear(); 229 } 230 } 231 232 if (found_local) 233 return; 234 235 // We didn't find any locals. Most likely we don't have symbols, so dump 236 // the information that we have for offline analysis. 237 InternalScopedString frame_desc; 238 Printf("Previously allocated frames:\n"); 239 for (uptr i = 0; i < frames; i++) { 240 const uptr *record_addr = &(*sa)[i]; 241 uptr record = *record_addr; 242 if (!record) 243 break; 244 uptr pc_mask = (1ULL << 48) - 1; 245 uptr pc = record & pc_mask; 246 frame_desc.append(" record_addr:0x%zx record:0x%zx", 247 reinterpret_cast<uptr>(record_addr), record); 248 if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) { 249 RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info, 250 common_flags()->symbolize_vs_style, 251 common_flags()->strip_path_prefix); 252 frame->ClearAll(); 253 } 254 Printf("%s\n", frame_desc.data()); 255 frame_desc.clear(); 256 } 257 } 258 259 // Returns true if tag == *tag_ptr, reading tags from short granules if 260 // necessary. This may return a false positive if tags 1-15 are used as a 261 // regular tag rather than a short granule marker. 262 static bool TagsEqual(tag_t tag, tag_t *tag_ptr) { 263 if (tag == *tag_ptr) 264 return true; 265 if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1) 266 return false; 267 uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr)); 268 tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1); 269 return tag == inline_tag; 270 } 271 272 // HWASan globals store the size of the global in the descriptor. In cases where 273 // we don't have a binary with symbols, we can't grab the size of the global 274 // from the debug info - but we might be able to retrieve it from the 275 // descriptor. Returns zero if the lookup failed. 276 static uptr GetGlobalSizeFromDescriptor(uptr ptr) { 277 // Find the ELF object that this global resides in. 278 Dl_info info; 279 if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0) 280 return 0; 281 auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase); 282 auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>( 283 reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff); 284 285 // Get the load bias. This is normally the same as the dli_fbase address on 286 // position-independent code, but can be different on non-PIE executables, 287 // binaries using LLD's partitioning feature, or binaries compiled with a 288 // linker script. 289 ElfW(Addr) load_bias = 0; 290 for (const auto &phdr : 291 ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) { 292 if (phdr.p_type != PT_LOAD || phdr.p_offset != 0) 293 continue; 294 load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr; 295 break; 296 } 297 298 // Walk all globals in this ELF object, looking for the one we're interested 299 // in. Once we find it, we can stop iterating and return the size of the 300 // global we're interested in. 301 for (const hwasan_global &global : 302 HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum)) 303 if (global.addr() <= ptr && ptr < global.addr() + global.size()) 304 return global.size(); 305 306 return 0; 307 } 308 309 static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate, 310 tag_t *left, tag_t *right) { 311 Decorator d; 312 uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate)); 313 HwasanChunkView chunk = FindHeapChunkByAddress(mem); 314 if (chunk.IsAllocated()) { 315 uptr offset; 316 const char *whence; 317 if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) { 318 offset = untagged_addr - chunk.Beg(); 319 whence = "inside"; 320 } else if (candidate == left) { 321 offset = untagged_addr - chunk.End(); 322 whence = "after"; 323 } else { 324 offset = chunk.Beg() - untagged_addr; 325 whence = "before"; 326 } 327 Printf("%s", d.Error()); 328 Printf("\nCause: heap-buffer-overflow\n"); 329 Printf("%s", d.Default()); 330 Printf("%s", d.Location()); 331 Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n", 332 untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(), 333 chunk.End()); 334 Printf("%s", d.Allocation()); 335 Printf("allocated by thread T%u here:\n", chunk.GetAllocThreadId()); 336 Printf("%s", d.Default()); 337 GetStackTraceFromId(chunk.GetAllocStackId()).Print(); 338 return; 339 } 340 // Check whether the address points into a loaded library. If so, this is 341 // most likely a global variable. 342 const char *module_name; 343 uptr module_address; 344 Symbolizer *sym = Symbolizer::GetOrInit(); 345 if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) { 346 Printf("%s", d.Error()); 347 Printf("\nCause: global-overflow\n"); 348 Printf("%s", d.Default()); 349 DataInfo info; 350 Printf("%s", d.Location()); 351 if (sym->SymbolizeData(mem, &info) && info.start) { 352 Printf( 353 "%p is located %zd bytes %s a %zd-byte global variable " 354 "%s [%p,%p) in %s\n", 355 untagged_addr, 356 candidate == left ? untagged_addr - (info.start + info.size) 357 : info.start - untagged_addr, 358 candidate == left ? "after" : "before", info.size, info.name, 359 info.start, info.start + info.size, module_name); 360 } else { 361 uptr size = GetGlobalSizeFromDescriptor(mem); 362 if (size == 0) 363 // We couldn't find the size of the global from the descriptors. 364 Printf( 365 "%p is located %s a global variable in " 366 "\n #0 0x%x (%s+0x%x)\n", 367 untagged_addr, candidate == left ? "after" : "before", mem, 368 module_name, module_address); 369 else 370 Printf( 371 "%p is located %s a %zd-byte global variable in " 372 "\n #0 0x%x (%s+0x%x)\n", 373 untagged_addr, candidate == left ? "after" : "before", size, mem, 374 module_name, module_address); 375 } 376 Printf("%s", d.Default()); 377 } 378 } 379 380 void PrintAddressDescription( 381 uptr tagged_addr, uptr access_size, 382 StackAllocationsRingBuffer *current_stack_allocations) { 383 Decorator d; 384 int num_descriptions_printed = 0; 385 uptr untagged_addr = UntagAddr(tagged_addr); 386 387 if (MemIsShadow(untagged_addr)) { 388 Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr, 389 d.Default()); 390 return; 391 } 392 393 // Print some very basic information about the address, if it's a heap. 394 HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); 395 if (uptr beg = chunk.Beg()) { 396 uptr size = chunk.ActualSize(); 397 Printf("%s[%p,%p) is a %s %s heap chunk; " 398 "size: %zd offset: %zd\n%s", 399 d.Location(), 400 beg, beg + size, 401 chunk.FromSmallHeap() ? "small" : "large", 402 chunk.IsAllocated() ? "allocated" : "unallocated", 403 size, untagged_addr - beg, 404 d.Default()); 405 } 406 407 tag_t addr_tag = GetTagFromPointer(tagged_addr); 408 409 bool on_stack = false; 410 // Check stack first. If the address is on the stack of a live thread, we 411 // know it cannot be a heap / global overflow. 412 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { 413 if (t->AddrIsInStack(untagged_addr)) { 414 on_stack = true; 415 // TODO(fmayer): figure out how to distinguish use-after-return and 416 // stack-buffer-overflow. 417 Printf("%s", d.Error()); 418 Printf("\nCause: stack tag-mismatch\n"); 419 Printf("%s", d.Location()); 420 Printf("Address %p is located in stack of thread T%zd\n", untagged_addr, 421 t->unique_id()); 422 Printf("%s", d.Default()); 423 t->Announce(); 424 425 auto *sa = (t == GetCurrentThread() && current_stack_allocations) 426 ? current_stack_allocations 427 : t->stack_allocations(); 428 PrintStackAllocations(sa, addr_tag, untagged_addr); 429 num_descriptions_printed++; 430 } 431 }); 432 433 // Check if this looks like a heap buffer overflow by scanning 434 // the shadow left and right and looking for the first adjacent 435 // object with a different memory tag. If that tag matches addr_tag, 436 // check the allocator if it has a live chunk there. 437 tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr)); 438 tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr; 439 uptr candidate_distance = 0; 440 for (; candidate_distance < 1000; candidate_distance++) { 441 if (MemIsShadow(reinterpret_cast<uptr>(left)) && 442 TagsEqual(addr_tag, left)) { 443 candidate = left; 444 break; 445 } 446 --left; 447 if (MemIsShadow(reinterpret_cast<uptr>(right)) && 448 TagsEqual(addr_tag, right)) { 449 candidate = right; 450 break; 451 } 452 ++right; 453 } 454 455 constexpr auto kCloseCandidateDistance = 1; 456 457 if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) { 458 ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right); 459 num_descriptions_printed++; 460 } 461 462 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { 463 // Scan all threads' ring buffers to find if it's a heap-use-after-free. 464 HeapAllocationRecord har; 465 uptr ring_index, num_matching_addrs, num_matching_addrs_4b; 466 if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har, 467 &ring_index, &num_matching_addrs, 468 &num_matching_addrs_4b)) { 469 Printf("%s", d.Error()); 470 Printf("\nCause: use-after-free\n"); 471 Printf("%s", d.Location()); 472 Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n", 473 untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), 474 har.requested_size, UntagAddr(har.tagged_addr), 475 UntagAddr(har.tagged_addr) + har.requested_size); 476 Printf("%s", d.Allocation()); 477 Printf("freed by thread T%u here:\n", t->unique_id()); 478 Printf("%s", d.Default()); 479 GetStackTraceFromId(har.free_context_id).Print(); 480 481 Printf("%s", d.Allocation()); 482 Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id); 483 Printf("%s", d.Default()); 484 GetStackTraceFromId(har.alloc_context_id).Print(); 485 486 // Print a developer note: the index of this heap object 487 // in the thread's deallocation ring buffer. 488 Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1, 489 flags()->heap_history_size); 490 Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs); 491 Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n", 492 num_matching_addrs_4b); 493 494 t->Announce(); 495 num_descriptions_printed++; 496 } 497 }); 498 499 if (candidate && num_descriptions_printed == 0) { 500 ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right); 501 num_descriptions_printed++; 502 } 503 504 // Print the remaining threads, as an extra information, 1 line per thread. 505 if (flags()->print_live_threads_info) 506 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); }); 507 508 if (!num_descriptions_printed) 509 // We exhausted our possibilities. Bail out. 510 Printf("HWAddressSanitizer can not describe address in more detail.\n"); 511 if (num_descriptions_printed > 1) { 512 Printf( 513 "There are %d potential causes, printed above in order " 514 "of likeliness.\n", 515 num_descriptions_printed); 516 } 517 } 518 519 void ReportStats() {} 520 521 static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows, 522 void (*print_tag)(InternalScopedString &s, 523 tag_t *tag)) { 524 const uptr row_len = 16; // better be power of two. 525 tag_t *center_row_beg = reinterpret_cast<tag_t *>( 526 RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len)); 527 tag_t *beg_row = center_row_beg - row_len * (num_rows / 2); 528 tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2); 529 InternalScopedString s; 530 for (tag_t *row = beg_row; row < end_row; row += row_len) { 531 s.append("%s", row == center_row_beg ? "=>" : " "); 532 s.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr>(row))); 533 for (uptr i = 0; i < row_len; i++) { 534 s.append("%s", row + i == tag_ptr ? "[" : " "); 535 print_tag(s, &row[i]); 536 s.append("%s", row + i == tag_ptr ? "]" : " "); 537 } 538 s.append("\n"); 539 } 540 Printf("%s", s.data()); 541 } 542 543 static void PrintTagsAroundAddr(tag_t *tag_ptr) { 544 Printf( 545 "Memory tags around the buggy address (one tag corresponds to %zd " 546 "bytes):\n", kShadowAlignment); 547 PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) { 548 s.append("%02x", *tag); 549 }); 550 551 Printf( 552 "Tags for short granules around the buggy address (one tag corresponds " 553 "to %zd bytes):\n", 554 kShadowAlignment); 555 PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) { 556 if (*tag >= 1 && *tag <= kShadowAlignment) { 557 uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag)); 558 s.append("%02x", 559 *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1)); 560 } else { 561 s.append(".."); 562 } 563 }); 564 Printf( 565 "See " 566 "https://clang.llvm.org/docs/" 567 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a " 568 "description of short granule tags\n"); 569 } 570 571 uptr GetTopPc(StackTrace *stack) { 572 return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0]) 573 : 0; 574 } 575 576 void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) { 577 ScopedReport R(flags()->halt_on_error); 578 579 uptr untagged_addr = UntagAddr(tagged_addr); 580 tag_t ptr_tag = GetTagFromPointer(tagged_addr); 581 tag_t *tag_ptr = nullptr; 582 tag_t mem_tag = 0; 583 if (MemIsApp(untagged_addr)) { 584 tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr)); 585 if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr))) 586 mem_tag = *tag_ptr; 587 else 588 tag_ptr = nullptr; 589 } 590 Decorator d; 591 Printf("%s", d.Error()); 592 uptr pc = GetTopPc(stack); 593 const char *bug_type = "invalid-free"; 594 const Thread *thread = GetCurrentThread(); 595 if (thread) { 596 Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n", 597 SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id()); 598 } else { 599 Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n", 600 SanitizerToolName, bug_type, untagged_addr, pc); 601 } 602 Printf("%s", d.Access()); 603 if (tag_ptr) 604 Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag); 605 Printf("%s", d.Default()); 606 607 stack->Print(); 608 609 PrintAddressDescription(tagged_addr, 0, nullptr); 610 611 if (tag_ptr) 612 PrintTagsAroundAddr(tag_ptr); 613 614 MaybePrintAndroidHelpUrl(); 615 ReportErrorSummary(bug_type, stack); 616 } 617 618 void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size, 619 const u8 *expected) { 620 uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment); 621 u8 actual_expected[kShadowAlignment]; 622 internal_memcpy(actual_expected, expected, tail_size); 623 tag_t ptr_tag = GetTagFromPointer(tagged_addr); 624 // Short granule is stashed in the last byte of the magic string. To avoid 625 // confusion, make the expected magic string contain the short granule tag. 626 if (orig_size % kShadowAlignment != 0) { 627 actual_expected[tail_size - 1] = ptr_tag; 628 } 629 630 ScopedReport R(flags()->halt_on_error); 631 Decorator d; 632 uptr untagged_addr = UntagAddr(tagged_addr); 633 Printf("%s", d.Error()); 634 const char *bug_type = "allocation-tail-overwritten"; 635 Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName, 636 bug_type, untagged_addr, untagged_addr + orig_size, orig_size); 637 Printf("\n%s", d.Default()); 638 Printf( 639 "Stack of invalid access unknown. Issue detected at deallocation " 640 "time.\n"); 641 Printf("%s", d.Allocation()); 642 Printf("deallocated here:\n"); 643 Printf("%s", d.Default()); 644 stack->Print(); 645 HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); 646 if (chunk.Beg()) { 647 Printf("%s", d.Allocation()); 648 Printf("allocated here:\n"); 649 Printf("%s", d.Default()); 650 GetStackTraceFromId(chunk.GetAllocStackId()).Print(); 651 } 652 653 InternalScopedString s; 654 CHECK_GT(tail_size, 0U); 655 CHECK_LT(tail_size, kShadowAlignment); 656 u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size); 657 s.append("Tail contains: "); 658 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) 659 s.append(".. "); 660 for (uptr i = 0; i < tail_size; i++) 661 s.append("%02x ", tail[i]); 662 s.append("\n"); 663 s.append("Expected: "); 664 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) 665 s.append(".. "); 666 for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]); 667 s.append("\n"); 668 s.append(" "); 669 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) 670 s.append(" "); 671 for (uptr i = 0; i < tail_size; i++) 672 s.append("%s ", actual_expected[i] != tail[i] ? "^^" : " "); 673 674 s.append("\nThis error occurs when a buffer overflow overwrites memory\n" 675 "after a heap object, but within the %zd-byte granule, e.g.\n" 676 " char *x = new char[20];\n" 677 " x[25] = 42;\n" 678 "%s does not detect such bugs in uninstrumented code at the time of write," 679 "\nbut can detect them at the time of free/delete.\n" 680 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n", 681 kShadowAlignment, SanitizerToolName); 682 Printf("%s", s.data()); 683 GetCurrentThread()->Announce(); 684 685 tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr)); 686 PrintTagsAroundAddr(tag_ptr); 687 688 MaybePrintAndroidHelpUrl(); 689 ReportErrorSummary(bug_type, stack); 690 } 691 692 void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, 693 bool is_store, bool fatal, uptr *registers_frame) { 694 ScopedReport R(fatal); 695 SavedStackAllocations current_stack_allocations( 696 GetCurrentThread()->stack_allocations()); 697 698 Decorator d; 699 uptr untagged_addr = UntagAddr(tagged_addr); 700 // TODO: when possible, try to print heap-use-after-free, etc. 701 const char *bug_type = "tag-mismatch"; 702 uptr pc = GetTopPc(stack); 703 Printf("%s", d.Error()); 704 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, 705 untagged_addr, pc); 706 707 Thread *t = GetCurrentThread(); 708 709 sptr offset = 710 __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size); 711 CHECK_GE(offset, 0); 712 CHECK_LT(offset, static_cast<sptr>(access_size)); 713 tag_t ptr_tag = GetTagFromPointer(tagged_addr); 714 tag_t *tag_ptr = 715 reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset)); 716 tag_t mem_tag = *tag_ptr; 717 718 Printf("%s", d.Access()); 719 if (mem_tag && mem_tag < kShadowAlignment) { 720 tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) & 721 ~(kShadowAlignment - 1)); 722 // If offset is 0, (untagged_addr + offset) is not aligned to granules. 723 // This is the offset of the leftmost accessed byte within the bad granule. 724 u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1); 725 tag_t short_tag = granule_ptr[kShadowAlignment - 1]; 726 // The first mismatch was a short granule that matched the ptr_tag. 727 if (short_tag == ptr_tag) { 728 // If the access starts after the end of the short granule, then the first 729 // bad byte is the first byte of the access; otherwise it is the first 730 // byte past the end of the short granule 731 if (mem_tag > in_granule_offset) { 732 offset += mem_tag - in_granule_offset; 733 } 734 } 735 Printf( 736 "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n", 737 is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, 738 mem_tag, short_tag, t->unique_id()); 739 } else { 740 Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n", 741 is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, 742 mem_tag, t->unique_id()); 743 } 744 if (offset != 0) 745 Printf("Invalid access starting at offset %zu\n", offset); 746 Printf("%s", d.Default()); 747 748 stack->Print(); 749 750 PrintAddressDescription(tagged_addr, access_size, 751 current_stack_allocations.get()); 752 t->Announce(); 753 754 PrintTagsAroundAddr(tag_ptr); 755 756 if (registers_frame) 757 ReportRegisters(registers_frame, pc); 758 759 MaybePrintAndroidHelpUrl(); 760 ReportErrorSummary(bug_type, stack); 761 } 762 763 // See the frame breakdown defined in __hwasan_tag_mismatch (from 764 // hwasan_tag_mismatch_{aarch64,riscv64}.S). 765 void ReportRegisters(uptr *frame, uptr pc) { 766 Printf("Registers where the failure occurred (pc %p):\n", pc); 767 768 // We explicitly print a single line (4 registers/line) each iteration to 769 // reduce the amount of logcat error messages printed. Each Printf() will 770 // result in a new logcat line, irrespective of whether a newline is present, 771 // and so we wish to reduce the number of Printf() calls we have to make. 772 #if defined(__aarch64__) 773 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n", 774 frame[0], frame[1], frame[2], frame[3]); 775 #elif SANITIZER_RISCV64 776 Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n", 777 reinterpret_cast<u8 *>(frame) + 256, frame[1], frame[2], frame[3]); 778 #endif 779 Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n", 780 frame[4], frame[5], frame[6], frame[7]); 781 Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n", 782 frame[8], frame[9], frame[10], frame[11]); 783 Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n", 784 frame[12], frame[13], frame[14], frame[15]); 785 Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n", 786 frame[16], frame[17], frame[18], frame[19]); 787 Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n", 788 frame[20], frame[21], frame[22], frame[23]); 789 Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n", 790 frame[24], frame[25], frame[26], frame[27]); 791 // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch 792 // passes it to this function. 793 #if defined(__aarch64__) 794 Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28], 795 frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256); 796 #elif SANITIZER_RISCV64 797 Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28], 798 frame[29], frame[30], frame[31]); 799 #else 800 #endif 801 } 802 803 } // namespace __hwasan 804 805 void __hwasan_set_error_report_callback(void (*callback)(const char *)) { 806 __hwasan::ScopedReport::SetErrorReportCallback(callback); 807 } 808