1 //===-- hwasan_report.cpp -------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of HWAddressSanitizer. 10 // 11 // Error reporting. 12 //===----------------------------------------------------------------------===// 13 14 #include "hwasan_report.h" 15 16 #include <dlfcn.h> 17 18 #include "hwasan.h" 19 #include "hwasan_allocator.h" 20 #include "hwasan_globals.h" 21 #include "hwasan_mapping.h" 22 #include "hwasan_thread.h" 23 #include "hwasan_thread_list.h" 24 #include "sanitizer_common/sanitizer_allocator_internal.h" 25 #include "sanitizer_common/sanitizer_common.h" 26 #include "sanitizer_common/sanitizer_flags.h" 27 #include "sanitizer_common/sanitizer_mutex.h" 28 #include "sanitizer_common/sanitizer_report_decorator.h" 29 #include "sanitizer_common/sanitizer_stackdepot.h" 30 #include "sanitizer_common/sanitizer_stacktrace_printer.h" 31 #include "sanitizer_common/sanitizer_symbolizer.h" 32 33 using namespace __sanitizer; 34 35 namespace __hwasan { 36 37 class ScopedReport { 38 public: 39 ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) { 40 Lock lock(&error_message_lock_); 41 error_message_ptr_ = fatal ? &error_message_ : nullptr; 42 ++hwasan_report_count; 43 } 44 45 ~ScopedReport() { 46 void (*report_cb)(const char *); 47 { 48 Lock lock(&error_message_lock_); 49 report_cb = error_report_callback_; 50 error_message_ptr_ = nullptr; 51 } 52 if (report_cb) 53 report_cb(error_message_.data()); 54 if (fatal) 55 SetAbortMessage(error_message_.data()); 56 if (common_flags()->print_module_map >= 2 || 57 (fatal && common_flags()->print_module_map)) 58 DumpProcessMap(); 59 if (fatal) 60 Die(); 61 } 62 63 static void MaybeAppendToErrorMessage(const char *msg) { 64 Lock lock(&error_message_lock_); 65 if (!error_message_ptr_) 66 return; 67 uptr len = internal_strlen(msg); 68 uptr old_size = error_message_ptr_->size(); 69 error_message_ptr_->resize(old_size + len); 70 // overwrite old trailing '\0', keep new trailing '\0' untouched. 71 internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len); 72 } 73 74 static void SetErrorReportCallback(void (*callback)(const char *)) { 75 Lock lock(&error_message_lock_); 76 error_report_callback_ = callback; 77 } 78 79 private: 80 ScopedErrorReportLock error_report_lock_; 81 InternalMmapVector<char> error_message_; 82 bool fatal; 83 84 static InternalMmapVector<char> *error_message_ptr_; 85 static Mutex error_message_lock_; 86 static void (*error_report_callback_)(const char *); 87 }; 88 89 InternalMmapVector<char> *ScopedReport::error_message_ptr_; 90 Mutex ScopedReport::error_message_lock_; 91 void (*ScopedReport::error_report_callback_)(const char *); 92 93 // If there is an active ScopedReport, append to its error message. 94 void AppendToErrorMessageBuffer(const char *buffer) { 95 ScopedReport::MaybeAppendToErrorMessage(buffer); 96 } 97 98 static StackTrace GetStackTraceFromId(u32 id) { 99 CHECK(id); 100 StackTrace res = StackDepotGet(id); 101 CHECK(res.trace); 102 return res; 103 } 104 105 static void MaybePrintAndroidHelpUrl() { 106 #if SANITIZER_ANDROID 107 Printf( 108 "Learn more about HWASan reports: " 109 "https://source.android.com/docs/security/test/memory-safety/" 110 "hwasan-reports\n"); 111 #endif 112 } 113 114 // A RAII object that holds a copy of the current thread stack ring buffer. 115 // The actual stack buffer may change while we are iterating over it (for 116 // example, Printf may call syslog() which can itself be built with hwasan). 117 class SavedStackAllocations { 118 public: 119 SavedStackAllocations(StackAllocationsRingBuffer *rb) { 120 uptr size = rb->size() * sizeof(uptr); 121 void *storage = 122 MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations"); 123 new (&rb_) StackAllocationsRingBuffer(*rb, storage); 124 } 125 126 ~SavedStackAllocations() { 127 StackAllocationsRingBuffer *rb = get(); 128 UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr)); 129 } 130 131 StackAllocationsRingBuffer *get() { 132 return (StackAllocationsRingBuffer *)&rb_; 133 } 134 135 private: 136 uptr rb_; 137 }; 138 139 class Decorator: public __sanitizer::SanitizerCommonDecorator { 140 public: 141 Decorator() : SanitizerCommonDecorator() { } 142 const char *Access() { return Blue(); } 143 const char *Allocation() const { return Magenta(); } 144 const char *Origin() const { return Magenta(); } 145 const char *Name() const { return Green(); } 146 const char *Location() { return Green(); } 147 const char *Thread() { return Green(); } 148 }; 149 150 static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr, 151 HeapAllocationRecord *har, uptr *ring_index, 152 uptr *num_matching_addrs, 153 uptr *num_matching_addrs_4b) { 154 if (!rb) return false; 155 156 *num_matching_addrs = 0; 157 *num_matching_addrs_4b = 0; 158 for (uptr i = 0, size = rb->size(); i < size; i++) { 159 auto h = (*rb)[i]; 160 if (h.tagged_addr <= tagged_addr && 161 h.tagged_addr + h.requested_size > tagged_addr) { 162 *har = h; 163 *ring_index = i; 164 return true; 165 } 166 167 // Measure the number of heap ring buffer entries that would have matched 168 // if we had only one entry per address (e.g. if the ring buffer data was 169 // stored at the address itself). This will help us tune the allocator 170 // implementation for MTE. 171 if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) && 172 UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) { 173 ++*num_matching_addrs; 174 } 175 176 // Measure the number of heap ring buffer entries that would have matched 177 // if we only had 4 tag bits, which is the case for MTE. 178 auto untag_4b = [](uptr p) { 179 return p & ((1ULL << 60) - 1); 180 }; 181 if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) && 182 untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) { 183 ++*num_matching_addrs_4b; 184 } 185 } 186 return false; 187 } 188 189 static void PrintStackAllocations(StackAllocationsRingBuffer *sa, 190 tag_t addr_tag, uptr untagged_addr) { 191 uptr frames = Min((uptr)flags()->stack_history_size, sa->size()); 192 bool found_local = false; 193 for (uptr i = 0; i < frames; i++) { 194 const uptr *record_addr = &(*sa)[i]; 195 uptr record = *record_addr; 196 if (!record) 197 break; 198 tag_t base_tag = 199 reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift; 200 uptr fp = (record >> kRecordFPShift) << kRecordFPLShift; 201 uptr pc_mask = (1ULL << kRecordFPShift) - 1; 202 uptr pc = record & pc_mask; 203 FrameInfo frame; 204 if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) { 205 for (LocalInfo &local : frame.locals) { 206 if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset) 207 continue; 208 tag_t obj_tag = base_tag ^ local.tag_offset; 209 if (obj_tag != addr_tag) 210 continue; 211 // Calculate the offset from the object address to the faulting 212 // address. Because we only store bits 4-19 of FP (bits 0-3 are 213 // guaranteed to be zero), the calculation is performed mod 2^20 and may 214 // harmlessly underflow if the address mod 2^20 is below the object 215 // address. 216 uptr obj_offset = 217 (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1); 218 if (obj_offset >= local.size) 219 continue; 220 if (!found_local) { 221 Printf("Potentially referenced stack objects:\n"); 222 found_local = true; 223 } 224 Printf(" %s in %s %s:%d\n", local.name, local.function_name, 225 local.decl_file, local.decl_line); 226 } 227 frame.Clear(); 228 } 229 } 230 231 if (found_local) 232 return; 233 234 // We didn't find any locals. Most likely we don't have symbols, so dump 235 // the information that we have for offline analysis. 236 InternalScopedString frame_desc; 237 Printf("Previously allocated frames:\n"); 238 for (uptr i = 0; i < frames; i++) { 239 const uptr *record_addr = &(*sa)[i]; 240 uptr record = *record_addr; 241 if (!record) 242 break; 243 uptr pc_mask = (1ULL << 48) - 1; 244 uptr pc = record & pc_mask; 245 frame_desc.append(" record_addr:0x%zx record:0x%zx", 246 reinterpret_cast<uptr>(record_addr), record); 247 if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) { 248 RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info, 249 common_flags()->symbolize_vs_style, 250 common_flags()->strip_path_prefix); 251 frame->ClearAll(); 252 } 253 Printf("%s\n", frame_desc.data()); 254 frame_desc.clear(); 255 } 256 } 257 258 // Returns true if tag == *tag_ptr, reading tags from short granules if 259 // necessary. This may return a false positive if tags 1-15 are used as a 260 // regular tag rather than a short granule marker. 261 static bool TagsEqual(tag_t tag, tag_t *tag_ptr) { 262 if (tag == *tag_ptr) 263 return true; 264 if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1) 265 return false; 266 uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr)); 267 tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1); 268 return tag == inline_tag; 269 } 270 271 // HWASan globals store the size of the global in the descriptor. In cases where 272 // we don't have a binary with symbols, we can't grab the size of the global 273 // from the debug info - but we might be able to retrieve it from the 274 // descriptor. Returns zero if the lookup failed. 275 static uptr GetGlobalSizeFromDescriptor(uptr ptr) { 276 // Find the ELF object that this global resides in. 277 Dl_info info; 278 if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0) 279 return 0; 280 auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase); 281 auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>( 282 reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff); 283 284 // Get the load bias. This is normally the same as the dli_fbase address on 285 // position-independent code, but can be different on non-PIE executables, 286 // binaries using LLD's partitioning feature, or binaries compiled with a 287 // linker script. 288 ElfW(Addr) load_bias = 0; 289 for (const auto &phdr : 290 ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) { 291 if (phdr.p_type != PT_LOAD || phdr.p_offset != 0) 292 continue; 293 load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr; 294 break; 295 } 296 297 // Walk all globals in this ELF object, looking for the one we're interested 298 // in. Once we find it, we can stop iterating and return the size of the 299 // global we're interested in. 300 for (const hwasan_global &global : 301 HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum)) 302 if (global.addr() <= ptr && ptr < global.addr() + global.size()) 303 return global.size(); 304 305 return 0; 306 } 307 308 static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate, 309 tag_t *left, tag_t *right) { 310 Decorator d; 311 uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate)); 312 HwasanChunkView chunk = FindHeapChunkByAddress(mem); 313 if (chunk.IsAllocated()) { 314 uptr offset; 315 const char *whence; 316 if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) { 317 offset = untagged_addr - chunk.Beg(); 318 whence = "inside"; 319 } else if (candidate == left) { 320 offset = untagged_addr - chunk.End(); 321 whence = "after"; 322 } else { 323 offset = chunk.Beg() - untagged_addr; 324 whence = "before"; 325 } 326 Printf("%s", d.Error()); 327 Printf("\nCause: heap-buffer-overflow\n"); 328 Printf("%s", d.Default()); 329 Printf("%s", d.Location()); 330 Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n", 331 untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(), 332 chunk.End()); 333 Printf("%s", d.Allocation()); 334 Printf("allocated here:\n"); 335 Printf("%s", d.Default()); 336 GetStackTraceFromId(chunk.GetAllocStackId()).Print(); 337 return; 338 } 339 // Check whether the address points into a loaded library. If so, this is 340 // most likely a global variable. 341 const char *module_name; 342 uptr module_address; 343 Symbolizer *sym = Symbolizer::GetOrInit(); 344 if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) { 345 Printf("%s", d.Error()); 346 Printf("\nCause: global-overflow\n"); 347 Printf("%s", d.Default()); 348 DataInfo info; 349 Printf("%s", d.Location()); 350 if (sym->SymbolizeData(mem, &info) && info.start) { 351 Printf( 352 "%p is located %zd bytes %s a %zd-byte global variable " 353 "%s [%p,%p) in %s\n", 354 untagged_addr, 355 candidate == left ? untagged_addr - (info.start + info.size) 356 : info.start - untagged_addr, 357 candidate == left ? "after" : "before", info.size, info.name, 358 info.start, info.start + info.size, module_name); 359 } else { 360 uptr size = GetGlobalSizeFromDescriptor(mem); 361 if (size == 0) 362 // We couldn't find the size of the global from the descriptors. 363 Printf( 364 "%p is located %s a global variable in " 365 "\n #0 0x%x (%s+0x%x)\n", 366 untagged_addr, candidate == left ? "after" : "before", mem, 367 module_name, module_address); 368 else 369 Printf( 370 "%p is located %s a %zd-byte global variable in " 371 "\n #0 0x%x (%s+0x%x)\n", 372 untagged_addr, candidate == left ? "after" : "before", size, mem, 373 module_name, module_address); 374 } 375 Printf("%s", d.Default()); 376 } 377 } 378 379 void PrintAddressDescription( 380 uptr tagged_addr, uptr access_size, 381 StackAllocationsRingBuffer *current_stack_allocations) { 382 Decorator d; 383 int num_descriptions_printed = 0; 384 uptr untagged_addr = UntagAddr(tagged_addr); 385 386 if (MemIsShadow(untagged_addr)) { 387 Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr, 388 d.Default()); 389 return; 390 } 391 392 // Print some very basic information about the address, if it's a heap. 393 HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); 394 if (uptr beg = chunk.Beg()) { 395 uptr size = chunk.ActualSize(); 396 Printf("%s[%p,%p) is a %s %s heap chunk; " 397 "size: %zd offset: %zd\n%s", 398 d.Location(), 399 beg, beg + size, 400 chunk.FromSmallHeap() ? "small" : "large", 401 chunk.IsAllocated() ? "allocated" : "unallocated", 402 size, untagged_addr - beg, 403 d.Default()); 404 } 405 406 tag_t addr_tag = GetTagFromPointer(tagged_addr); 407 408 bool on_stack = false; 409 // Check stack first. If the address is on the stack of a live thread, we 410 // know it cannot be a heap / global overflow. 411 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { 412 if (t->AddrIsInStack(untagged_addr)) { 413 on_stack = true; 414 // TODO(fmayer): figure out how to distinguish use-after-return and 415 // stack-buffer-overflow. 416 Printf("%s", d.Error()); 417 Printf("\nCause: stack tag-mismatch\n"); 418 Printf("%s", d.Location()); 419 Printf("Address %p is located in stack of thread T%zd\n", untagged_addr, 420 t->unique_id()); 421 Printf("%s", d.Default()); 422 t->Announce(); 423 424 auto *sa = (t == GetCurrentThread() && current_stack_allocations) 425 ? current_stack_allocations 426 : t->stack_allocations(); 427 PrintStackAllocations(sa, addr_tag, untagged_addr); 428 num_descriptions_printed++; 429 } 430 }); 431 432 // Check if this looks like a heap buffer overflow by scanning 433 // the shadow left and right and looking for the first adjacent 434 // object with a different memory tag. If that tag matches addr_tag, 435 // check the allocator if it has a live chunk there. 436 tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr)); 437 tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr; 438 uptr candidate_distance = 0; 439 for (; candidate_distance < 1000; candidate_distance++) { 440 if (MemIsShadow(reinterpret_cast<uptr>(left)) && 441 TagsEqual(addr_tag, left)) { 442 candidate = left; 443 break; 444 } 445 --left; 446 if (MemIsShadow(reinterpret_cast<uptr>(right)) && 447 TagsEqual(addr_tag, right)) { 448 candidate = right; 449 break; 450 } 451 ++right; 452 } 453 454 constexpr auto kCloseCandidateDistance = 1; 455 456 if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) { 457 ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right); 458 num_descriptions_printed++; 459 } 460 461 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { 462 // Scan all threads' ring buffers to find if it's a heap-use-after-free. 463 HeapAllocationRecord har; 464 uptr ring_index, num_matching_addrs, num_matching_addrs_4b; 465 if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har, 466 &ring_index, &num_matching_addrs, 467 &num_matching_addrs_4b)) { 468 Printf("%s", d.Error()); 469 Printf("\nCause: use-after-free\n"); 470 Printf("%s", d.Location()); 471 Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n", 472 untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), 473 har.requested_size, UntagAddr(har.tagged_addr), 474 UntagAddr(har.tagged_addr) + har.requested_size); 475 Printf("%s", d.Allocation()); 476 Printf("freed by thread T%zd here:\n", t->unique_id()); 477 Printf("%s", d.Default()); 478 GetStackTraceFromId(har.free_context_id).Print(); 479 480 Printf("%s", d.Allocation()); 481 Printf("previously allocated here:\n", t); 482 Printf("%s", d.Default()); 483 GetStackTraceFromId(har.alloc_context_id).Print(); 484 485 // Print a developer note: the index of this heap object 486 // in the thread's deallocation ring buffer. 487 Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1, 488 flags()->heap_history_size); 489 Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs); 490 Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n", 491 num_matching_addrs_4b); 492 493 t->Announce(); 494 num_descriptions_printed++; 495 } 496 }); 497 498 if (candidate && num_descriptions_printed == 0) { 499 ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right); 500 num_descriptions_printed++; 501 } 502 503 // Print the remaining threads, as an extra information, 1 line per thread. 504 hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); }); 505 506 if (!num_descriptions_printed) 507 // We exhausted our possibilities. Bail out. 508 Printf("HWAddressSanitizer can not describe address in more detail.\n"); 509 if (num_descriptions_printed > 1) { 510 Printf( 511 "There are %d potential causes, printed above in order " 512 "of likeliness.\n", 513 num_descriptions_printed); 514 } 515 } 516 517 void ReportStats() {} 518 519 static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows, 520 void (*print_tag)(InternalScopedString &s, 521 tag_t *tag)) { 522 const uptr row_len = 16; // better be power of two. 523 tag_t *center_row_beg = reinterpret_cast<tag_t *>( 524 RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len)); 525 tag_t *beg_row = center_row_beg - row_len * (num_rows / 2); 526 tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2); 527 InternalScopedString s; 528 for (tag_t *row = beg_row; row < end_row; row += row_len) { 529 s.append("%s", row == center_row_beg ? "=>" : " "); 530 s.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr>(row))); 531 for (uptr i = 0; i < row_len; i++) { 532 s.append("%s", row + i == tag_ptr ? "[" : " "); 533 print_tag(s, &row[i]); 534 s.append("%s", row + i == tag_ptr ? "]" : " "); 535 } 536 s.append("\n"); 537 } 538 Printf("%s", s.data()); 539 } 540 541 static void PrintTagsAroundAddr(tag_t *tag_ptr) { 542 Printf( 543 "Memory tags around the buggy address (one tag corresponds to %zd " 544 "bytes):\n", kShadowAlignment); 545 PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) { 546 s.append("%02x", *tag); 547 }); 548 549 Printf( 550 "Tags for short granules around the buggy address (one tag corresponds " 551 "to %zd bytes):\n", 552 kShadowAlignment); 553 PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) { 554 if (*tag >= 1 && *tag <= kShadowAlignment) { 555 uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag)); 556 s.append("%02x", 557 *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1)); 558 } else { 559 s.append(".."); 560 } 561 }); 562 Printf( 563 "See " 564 "https://clang.llvm.org/docs/" 565 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a " 566 "description of short granule tags\n"); 567 } 568 569 uptr GetTopPc(StackTrace *stack) { 570 return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0]) 571 : 0; 572 } 573 574 void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) { 575 ScopedReport R(flags()->halt_on_error); 576 577 uptr untagged_addr = UntagAddr(tagged_addr); 578 tag_t ptr_tag = GetTagFromPointer(tagged_addr); 579 tag_t *tag_ptr = nullptr; 580 tag_t mem_tag = 0; 581 if (MemIsApp(untagged_addr)) { 582 tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr)); 583 if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr))) 584 mem_tag = *tag_ptr; 585 else 586 tag_ptr = nullptr; 587 } 588 Decorator d; 589 Printf("%s", d.Error()); 590 uptr pc = GetTopPc(stack); 591 const char *bug_type = "invalid-free"; 592 const Thread *thread = GetCurrentThread(); 593 if (thread) { 594 Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n", 595 SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id()); 596 } else { 597 Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n", 598 SanitizerToolName, bug_type, untagged_addr, pc); 599 } 600 Printf("%s", d.Access()); 601 if (tag_ptr) 602 Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag); 603 Printf("%s", d.Default()); 604 605 stack->Print(); 606 607 PrintAddressDescription(tagged_addr, 0, nullptr); 608 609 if (tag_ptr) 610 PrintTagsAroundAddr(tag_ptr); 611 612 MaybePrintAndroidHelpUrl(); 613 ReportErrorSummary(bug_type, stack); 614 } 615 616 void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size, 617 const u8 *expected) { 618 uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment); 619 u8 actual_expected[kShadowAlignment]; 620 internal_memcpy(actual_expected, expected, tail_size); 621 tag_t ptr_tag = GetTagFromPointer(tagged_addr); 622 // Short granule is stashed in the last byte of the magic string. To avoid 623 // confusion, make the expected magic string contain the short granule tag. 624 if (orig_size % kShadowAlignment != 0) { 625 actual_expected[tail_size - 1] = ptr_tag; 626 } 627 628 ScopedReport R(flags()->halt_on_error); 629 Decorator d; 630 uptr untagged_addr = UntagAddr(tagged_addr); 631 Printf("%s", d.Error()); 632 const char *bug_type = "allocation-tail-overwritten"; 633 Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName, 634 bug_type, untagged_addr, untagged_addr + orig_size, orig_size); 635 Printf("\n%s", d.Default()); 636 Printf( 637 "Stack of invalid access unknown. Issue detected at deallocation " 638 "time.\n"); 639 Printf("%s", d.Allocation()); 640 Printf("deallocated here:\n"); 641 Printf("%s", d.Default()); 642 stack->Print(); 643 HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); 644 if (chunk.Beg()) { 645 Printf("%s", d.Allocation()); 646 Printf("allocated here:\n"); 647 Printf("%s", d.Default()); 648 GetStackTraceFromId(chunk.GetAllocStackId()).Print(); 649 } 650 651 InternalScopedString s; 652 CHECK_GT(tail_size, 0U); 653 CHECK_LT(tail_size, kShadowAlignment); 654 u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size); 655 s.append("Tail contains: "); 656 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) 657 s.append(".. "); 658 for (uptr i = 0; i < tail_size; i++) 659 s.append("%02x ", tail[i]); 660 s.append("\n"); 661 s.append("Expected: "); 662 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) 663 s.append(".. "); 664 for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]); 665 s.append("\n"); 666 s.append(" "); 667 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) 668 s.append(" "); 669 for (uptr i = 0; i < tail_size; i++) 670 s.append("%s ", actual_expected[i] != tail[i] ? "^^" : " "); 671 672 s.append("\nThis error occurs when a buffer overflow overwrites memory\n" 673 "after a heap object, but within the %zd-byte granule, e.g.\n" 674 " char *x = new char[20];\n" 675 " x[25] = 42;\n" 676 "%s does not detect such bugs in uninstrumented code at the time of write," 677 "\nbut can detect them at the time of free/delete.\n" 678 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n", 679 kShadowAlignment, SanitizerToolName); 680 Printf("%s", s.data()); 681 GetCurrentThread()->Announce(); 682 683 tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr)); 684 PrintTagsAroundAddr(tag_ptr); 685 686 MaybePrintAndroidHelpUrl(); 687 ReportErrorSummary(bug_type, stack); 688 } 689 690 void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, 691 bool is_store, bool fatal, uptr *registers_frame) { 692 ScopedReport R(fatal); 693 SavedStackAllocations current_stack_allocations( 694 GetCurrentThread()->stack_allocations()); 695 696 Decorator d; 697 uptr untagged_addr = UntagAddr(tagged_addr); 698 // TODO: when possible, try to print heap-use-after-free, etc. 699 const char *bug_type = "tag-mismatch"; 700 uptr pc = GetTopPc(stack); 701 Printf("%s", d.Error()); 702 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, 703 untagged_addr, pc); 704 705 Thread *t = GetCurrentThread(); 706 707 sptr offset = 708 __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size); 709 CHECK(offset >= 0 && offset < static_cast<sptr>(access_size)); 710 tag_t ptr_tag = GetTagFromPointer(tagged_addr); 711 tag_t *tag_ptr = 712 reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset)); 713 tag_t mem_tag = *tag_ptr; 714 715 Printf("%s", d.Access()); 716 if (mem_tag && mem_tag < kShadowAlignment) { 717 tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) & 718 ~(kShadowAlignment - 1)); 719 // If offset is 0, (untagged_addr + offset) is not aligned to granules. 720 // This is the offset of the leftmost accessed byte within the bad granule. 721 u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1); 722 tag_t short_tag = granule_ptr[kShadowAlignment - 1]; 723 // The first mismatch was a short granule that matched the ptr_tag. 724 if (short_tag == ptr_tag) { 725 // If the access starts after the end of the short granule, then the first 726 // bad byte is the first byte of the access; otherwise it is the first 727 // byte past the end of the short granule 728 if (mem_tag > in_granule_offset) { 729 offset += mem_tag - in_granule_offset; 730 } 731 } 732 Printf( 733 "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n", 734 is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, 735 mem_tag, short_tag, t->unique_id()); 736 } else { 737 Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n", 738 is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, 739 mem_tag, t->unique_id()); 740 } 741 if (offset != 0) 742 Printf("Invalid access starting at offset %zu\n", offset); 743 Printf("%s", d.Default()); 744 745 stack->Print(); 746 747 PrintAddressDescription(tagged_addr, access_size, 748 current_stack_allocations.get()); 749 t->Announce(); 750 751 PrintTagsAroundAddr(tag_ptr); 752 753 if (registers_frame) 754 ReportRegisters(registers_frame, pc); 755 756 MaybePrintAndroidHelpUrl(); 757 ReportErrorSummary(bug_type, stack); 758 } 759 760 // See the frame breakdown defined in __hwasan_tag_mismatch (from 761 // hwasan_tag_mismatch_{aarch64,riscv64}.S). 762 void ReportRegisters(uptr *frame, uptr pc) { 763 Printf("Registers where the failure occurred (pc %p):\n", pc); 764 765 // We explicitly print a single line (4 registers/line) each iteration to 766 // reduce the amount of logcat error messages printed. Each Printf() will 767 // result in a new logcat line, irrespective of whether a newline is present, 768 // and so we wish to reduce the number of Printf() calls we have to make. 769 #if defined(__aarch64__) 770 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n", 771 frame[0], frame[1], frame[2], frame[3]); 772 #elif SANITIZER_RISCV64 773 Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n", 774 reinterpret_cast<u8 *>(frame) + 256, frame[1], frame[2], frame[3]); 775 #endif 776 Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n", 777 frame[4], frame[5], frame[6], frame[7]); 778 Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n", 779 frame[8], frame[9], frame[10], frame[11]); 780 Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n", 781 frame[12], frame[13], frame[14], frame[15]); 782 Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n", 783 frame[16], frame[17], frame[18], frame[19]); 784 Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n", 785 frame[20], frame[21], frame[22], frame[23]); 786 Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n", 787 frame[24], frame[25], frame[26], frame[27]); 788 // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch 789 // passes it to this function. 790 #if defined(__aarch64__) 791 Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28], 792 frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256); 793 #elif SANITIZER_RISCV64 794 Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28], 795 frame[29], frame[30], frame[31]); 796 #else 797 #endif 798 } 799 800 } // namespace __hwasan 801 802 void __hwasan_set_error_report_callback(void (*callback)(const char *)) { 803 __hwasan::ScopedReport::SetErrorReportCallback(callback); 804 } 805