xref: /freebsd/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 //===-- hwasan_report.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 // Error reporting.
12 //===----------------------------------------------------------------------===//
13 
14 #include "hwasan_report.h"
15 
16 #include <dlfcn.h>
17 
18 #include "hwasan.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_globals.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_thread_list.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_common.h"
26 #include "sanitizer_common/sanitizer_flags.h"
27 #include "sanitizer_common/sanitizer_mutex.h"
28 #include "sanitizer_common/sanitizer_report_decorator.h"
29 #include "sanitizer_common/sanitizer_stackdepot.h"
30 #include "sanitizer_common/sanitizer_stacktrace_printer.h"
31 #include "sanitizer_common/sanitizer_symbolizer.h"
32 
33 using namespace __sanitizer;
34 
35 namespace __hwasan {
36 
37 class ScopedReport {
38  public:
39   ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
40     BlockingMutexLock lock(&error_message_lock_);
41     error_message_ptr_ = fatal ? &error_message_ : nullptr;
42     ++hwasan_report_count;
43   }
44 
45   ~ScopedReport() {
46     {
47       BlockingMutexLock lock(&error_message_lock_);
48       if (fatal)
49         SetAbortMessage(error_message_.data());
50       error_message_ptr_ = nullptr;
51     }
52     if (common_flags()->print_module_map >= 2 ||
53         (fatal && common_flags()->print_module_map))
54       DumpProcessMap();
55     if (fatal)
56       Die();
57   }
58 
59   static void MaybeAppendToErrorMessage(const char *msg) {
60     BlockingMutexLock lock(&error_message_lock_);
61     if (!error_message_ptr_)
62       return;
63     uptr len = internal_strlen(msg);
64     uptr old_size = error_message_ptr_->size();
65     error_message_ptr_->resize(old_size + len);
66     // overwrite old trailing '\0', keep new trailing '\0' untouched.
67     internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
68   }
69  private:
70   ScopedErrorReportLock error_report_lock_;
71   InternalMmapVector<char> error_message_;
72   bool fatal;
73 
74   static InternalMmapVector<char> *error_message_ptr_;
75   static BlockingMutex error_message_lock_;
76 };
77 
78 InternalMmapVector<char> *ScopedReport::error_message_ptr_;
79 BlockingMutex ScopedReport::error_message_lock_;
80 
81 // If there is an active ScopedReport, append to its error message.
82 void AppendToErrorMessageBuffer(const char *buffer) {
83   ScopedReport::MaybeAppendToErrorMessage(buffer);
84 }
85 
86 static StackTrace GetStackTraceFromId(u32 id) {
87   CHECK(id);
88   StackTrace res = StackDepotGet(id);
89   CHECK(res.trace);
90   return res;
91 }
92 
93 // A RAII object that holds a copy of the current thread stack ring buffer.
94 // The actual stack buffer may change while we are iterating over it (for
95 // example, Printf may call syslog() which can itself be built with hwasan).
96 class SavedStackAllocations {
97  public:
98   SavedStackAllocations(StackAllocationsRingBuffer *rb) {
99     uptr size = rb->size() * sizeof(uptr);
100     void *storage =
101         MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
102     new (&rb_) StackAllocationsRingBuffer(*rb, storage);
103   }
104 
105   ~SavedStackAllocations() {
106     StackAllocationsRingBuffer *rb = get();
107     UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
108   }
109 
110   StackAllocationsRingBuffer *get() {
111     return (StackAllocationsRingBuffer *)&rb_;
112   }
113 
114  private:
115   uptr rb_;
116 };
117 
118 class Decorator: public __sanitizer::SanitizerCommonDecorator {
119  public:
120   Decorator() : SanitizerCommonDecorator() { }
121   const char *Access() { return Blue(); }
122   const char *Allocation() const { return Magenta(); }
123   const char *Origin() const { return Magenta(); }
124   const char *Name() const { return Green(); }
125   const char *Location() { return Green(); }
126   const char *Thread() { return Green(); }
127 };
128 
129 static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
130                                HeapAllocationRecord *har, uptr *ring_index,
131                                uptr *num_matching_addrs,
132                                uptr *num_matching_addrs_4b) {
133   if (!rb) return false;
134 
135   *num_matching_addrs = 0;
136   *num_matching_addrs_4b = 0;
137   for (uptr i = 0, size = rb->size(); i < size; i++) {
138     auto h = (*rb)[i];
139     if (h.tagged_addr <= tagged_addr &&
140         h.tagged_addr + h.requested_size > tagged_addr) {
141       *har = h;
142       *ring_index = i;
143       return true;
144     }
145 
146     // Measure the number of heap ring buffer entries that would have matched
147     // if we had only one entry per address (e.g. if the ring buffer data was
148     // stored at the address itself). This will help us tune the allocator
149     // implementation for MTE.
150     if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
151         UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
152       ++*num_matching_addrs;
153     }
154 
155     // Measure the number of heap ring buffer entries that would have matched
156     // if we only had 4 tag bits, which is the case for MTE.
157     auto untag_4b = [](uptr p) {
158       return p & ((1ULL << 60) - 1);
159     };
160     if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
161         untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
162       ++*num_matching_addrs_4b;
163     }
164   }
165   return false;
166 }
167 
168 static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
169                                   tag_t addr_tag, uptr untagged_addr) {
170   uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
171   bool found_local = false;
172   for (uptr i = 0; i < frames; i++) {
173     const uptr *record_addr = &(*sa)[i];
174     uptr record = *record_addr;
175     if (!record)
176       break;
177     tag_t base_tag =
178         reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
179     uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
180     uptr pc_mask = (1ULL << kRecordFPShift) - 1;
181     uptr pc = record & pc_mask;
182     FrameInfo frame;
183     if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
184       for (LocalInfo &local : frame.locals) {
185         if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
186           continue;
187         tag_t obj_tag = base_tag ^ local.tag_offset;
188         if (obj_tag != addr_tag)
189           continue;
190         // Calculate the offset from the object address to the faulting
191         // address. Because we only store bits 4-19 of FP (bits 0-3 are
192         // guaranteed to be zero), the calculation is performed mod 2^20 and may
193         // harmlessly underflow if the address mod 2^20 is below the object
194         // address.
195         uptr obj_offset =
196             (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
197         if (obj_offset >= local.size)
198           continue;
199         if (!found_local) {
200           Printf("Potentially referenced stack objects:\n");
201           found_local = true;
202         }
203         Printf("  %s in %s %s:%d\n", local.name, local.function_name,
204                local.decl_file, local.decl_line);
205       }
206       frame.Clear();
207     }
208   }
209 
210   if (found_local)
211     return;
212 
213   // We didn't find any locals. Most likely we don't have symbols, so dump
214   // the information that we have for offline analysis.
215   InternalScopedString frame_desc(GetPageSizeCached() * 2);
216   Printf("Previously allocated frames:\n");
217   for (uptr i = 0; i < frames; i++) {
218     const uptr *record_addr = &(*sa)[i];
219     uptr record = *record_addr;
220     if (!record)
221       break;
222     uptr pc_mask = (1ULL << 48) - 1;
223     uptr pc = record & pc_mask;
224     frame_desc.append("  record_addr:0x%zx record:0x%zx",
225                       reinterpret_cast<uptr>(record_addr), record);
226     if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
227       RenderFrame(&frame_desc, " %F %L\n", 0, frame->info,
228                   common_flags()->symbolize_vs_style,
229                   common_flags()->strip_path_prefix);
230       frame->ClearAll();
231     }
232     Printf("%s", frame_desc.data());
233     frame_desc.clear();
234   }
235 }
236 
237 // Returns true if tag == *tag_ptr, reading tags from short granules if
238 // necessary. This may return a false positive if tags 1-15 are used as a
239 // regular tag rather than a short granule marker.
240 static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
241   if (tag == *tag_ptr)
242     return true;
243   if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
244     return false;
245   uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
246   tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
247   return tag == inline_tag;
248 }
249 
250 // HWASan globals store the size of the global in the descriptor. In cases where
251 // we don't have a binary with symbols, we can't grab the size of the global
252 // from the debug info - but we might be able to retrieve it from the
253 // descriptor. Returns zero if the lookup failed.
254 static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
255   // Find the ELF object that this global resides in.
256   Dl_info info;
257   dladdr(reinterpret_cast<void *>(ptr), &info);
258   auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
259   auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
260       reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
261 
262   // Get the load bias. This is normally the same as the dli_fbase address on
263   // position-independent code, but can be different on non-PIE executables,
264   // binaries using LLD's partitioning feature, or binaries compiled with a
265   // linker script.
266   ElfW(Addr) load_bias = 0;
267   for (const auto &phdr :
268        ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
269     if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
270       continue;
271     load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
272     break;
273   }
274 
275   // Walk all globals in this ELF object, looking for the one we're interested
276   // in. Once we find it, we can stop iterating and return the size of the
277   // global we're interested in.
278   for (const hwasan_global &global :
279        HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
280     if (global.addr() <= ptr && ptr < global.addr() + global.size())
281       return global.size();
282 
283   return 0;
284 }
285 
286 void PrintAddressDescription(
287     uptr tagged_addr, uptr access_size,
288     StackAllocationsRingBuffer *current_stack_allocations) {
289   Decorator d;
290   int num_descriptions_printed = 0;
291   uptr untagged_addr = UntagAddr(tagged_addr);
292 
293   // Print some very basic information about the address, if it's a heap.
294   HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
295   if (uptr beg = chunk.Beg()) {
296     uptr size = chunk.ActualSize();
297     Printf("%s[%p,%p) is a %s %s heap chunk; "
298            "size: %zd offset: %zd\n%s",
299            d.Location(),
300            beg, beg + size,
301            chunk.FromSmallHeap() ? "small" : "large",
302            chunk.IsAllocated() ? "allocated" : "unallocated",
303            size, untagged_addr - beg,
304            d.Default());
305   }
306 
307   // Check if this looks like a heap buffer overflow by scanning
308   // the shadow left and right and looking for the first adjacent
309   // object with a different memory tag. If that tag matches addr_tag,
310   // check the allocator if it has a live chunk there.
311   tag_t addr_tag = GetTagFromPointer(tagged_addr);
312   tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
313   tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
314   for (int i = 0; i < 1000; i++) {
315     if (TagsEqual(addr_tag, left)) {
316       candidate = left;
317       break;
318     }
319     --left;
320     if (TagsEqual(addr_tag, right)) {
321       candidate = right;
322       break;
323     }
324     ++right;
325   }
326 
327   if (candidate) {
328     uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
329     HwasanChunkView chunk = FindHeapChunkByAddress(mem);
330     if (chunk.IsAllocated()) {
331       Printf("%s", d.Location());
332       Printf("%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
333              untagged_addr,
334              candidate == left ? untagged_addr - chunk.End()
335                                : chunk.Beg() - untagged_addr,
336              candidate == left ? "right" : "left", chunk.UsedSize(),
337              chunk.Beg(), chunk.End());
338       Printf("%s", d.Allocation());
339       Printf("allocated here:\n");
340       Printf("%s", d.Default());
341       GetStackTraceFromId(chunk.GetAllocStackId()).Print();
342       num_descriptions_printed++;
343     } else {
344       // Check whether the address points into a loaded library. If so, this is
345       // most likely a global variable.
346       const char *module_name;
347       uptr module_address;
348       Symbolizer *sym = Symbolizer::GetOrInit();
349       if (sym->GetModuleNameAndOffsetForPC(mem, &module_name,
350                                            &module_address)) {
351         DataInfo info;
352         if (sym->SymbolizeData(mem, &info) && info.start) {
353           Printf(
354               "%p is located %zd bytes to the %s of %zd-byte global variable "
355               "%s [%p,%p) in %s\n",
356               untagged_addr,
357               candidate == left ? untagged_addr - (info.start + info.size)
358                                 : info.start - untagged_addr,
359               candidate == left ? "right" : "left", info.size, info.name,
360               info.start, info.start + info.size, module_name);
361         } else {
362           uptr size = GetGlobalSizeFromDescriptor(mem);
363           if (size == 0)
364             // We couldn't find the size of the global from the descriptors.
365             Printf(
366                 "%p is located to the %s of a global variable in (%s+0x%x)\n",
367                 untagged_addr, candidate == left ? "right" : "left",
368                 module_name, module_address);
369           else
370             Printf(
371                 "%p is located to the %s of a %zd-byte global variable in "
372                 "(%s+0x%x)\n",
373                 untagged_addr, candidate == left ? "right" : "left", size,
374                 module_name, module_address);
375         }
376         num_descriptions_printed++;
377       }
378     }
379   }
380 
381   hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
382     // Scan all threads' ring buffers to find if it's a heap-use-after-free.
383     HeapAllocationRecord har;
384     uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
385     if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
386                            &ring_index, &num_matching_addrs,
387                            &num_matching_addrs_4b)) {
388       Printf("%s", d.Location());
389       Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
390              untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
391              har.requested_size, UntagAddr(har.tagged_addr),
392              UntagAddr(har.tagged_addr) + har.requested_size);
393       Printf("%s", d.Allocation());
394       Printf("freed by thread T%zd here:\n", t->unique_id());
395       Printf("%s", d.Default());
396       GetStackTraceFromId(har.free_context_id).Print();
397 
398       Printf("%s", d.Allocation());
399       Printf("previously allocated here:\n", t);
400       Printf("%s", d.Default());
401       GetStackTraceFromId(har.alloc_context_id).Print();
402 
403       // Print a developer note: the index of this heap object
404       // in the thread's deallocation ring buffer.
405       Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
406              flags()->heap_history_size);
407       Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
408       Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
409              num_matching_addrs_4b);
410 
411       t->Announce();
412       num_descriptions_printed++;
413     }
414 
415     // Very basic check for stack memory.
416     if (t->AddrIsInStack(untagged_addr)) {
417       Printf("%s", d.Location());
418       Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
419              t->unique_id());
420       Printf("%s", d.Default());
421       t->Announce();
422 
423       auto *sa = (t == GetCurrentThread() && current_stack_allocations)
424                      ? current_stack_allocations
425                      : t->stack_allocations();
426       PrintStackAllocations(sa, addr_tag, untagged_addr);
427       num_descriptions_printed++;
428     }
429   });
430 
431   // Print the remaining threads, as an extra information, 1 line per thread.
432   hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
433 
434   if (!num_descriptions_printed)
435     // We exhausted our possibilities. Bail out.
436     Printf("HWAddressSanitizer can not describe address in more detail.\n");
437 }
438 
439 void ReportStats() {}
440 
441 static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
442                                    void (*print_tag)(InternalScopedString &s,
443                                                      tag_t *tag)) {
444   const uptr row_len = 16;  // better be power of two.
445   tag_t *center_row_beg = reinterpret_cast<tag_t *>(
446       RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
447   tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
448   tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
449   InternalScopedString s(GetPageSizeCached() * 8);
450   for (tag_t *row = beg_row; row < end_row; row += row_len) {
451     s.append("%s", row == center_row_beg ? "=>" : "  ");
452     s.append("%p:", row);
453     for (uptr i = 0; i < row_len; i++) {
454       s.append("%s", row + i == tag_ptr ? "[" : " ");
455       print_tag(s, &row[i]);
456       s.append("%s", row + i == tag_ptr ? "]" : " ");
457     }
458     s.append("\n");
459   }
460   Printf("%s", s.data());
461 }
462 
463 static void PrintTagsAroundAddr(tag_t *tag_ptr) {
464   Printf(
465       "Memory tags around the buggy address (one tag corresponds to %zd "
466       "bytes):\n", kShadowAlignment);
467   PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
468     s.append("%02x", *tag);
469   });
470 
471   Printf(
472       "Tags for short granules around the buggy address (one tag corresponds "
473       "to %zd bytes):\n",
474       kShadowAlignment);
475   PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
476     if (*tag >= 1 && *tag <= kShadowAlignment) {
477       uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
478       s.append("%02x",
479                *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
480     } else {
481       s.append("..");
482     }
483   });
484   Printf(
485       "See "
486       "https://clang.llvm.org/docs/"
487       "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
488       "description of short granule tags\n");
489 }
490 
491 void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
492   ScopedReport R(flags()->halt_on_error);
493 
494   uptr untagged_addr = UntagAddr(tagged_addr);
495   tag_t ptr_tag = GetTagFromPointer(tagged_addr);
496   tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
497   tag_t mem_tag = *tag_ptr;
498   Decorator d;
499   Printf("%s", d.Error());
500   uptr pc = stack->size ? stack->trace[0] : 0;
501   const char *bug_type = "invalid-free";
502   Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
503          untagged_addr, pc);
504   Printf("%s", d.Access());
505   Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
506   Printf("%s", d.Default());
507 
508   stack->Print();
509 
510   PrintAddressDescription(tagged_addr, 0, nullptr);
511 
512   PrintTagsAroundAddr(tag_ptr);
513 
514   ReportErrorSummary(bug_type, stack);
515 }
516 
517 void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
518                            const u8 *expected) {
519   uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
520   ScopedReport R(flags()->halt_on_error);
521   Decorator d;
522   uptr untagged_addr = UntagAddr(tagged_addr);
523   Printf("%s", d.Error());
524   const char *bug_type = "allocation-tail-overwritten";
525   Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
526          bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
527   Printf("\n%s", d.Default());
528   stack->Print();
529   HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
530   if (chunk.Beg()) {
531     Printf("%s", d.Allocation());
532     Printf("allocated here:\n");
533     Printf("%s", d.Default());
534     GetStackTraceFromId(chunk.GetAllocStackId()).Print();
535   }
536 
537   InternalScopedString s(GetPageSizeCached() * 8);
538   CHECK_GT(tail_size, 0U);
539   CHECK_LT(tail_size, kShadowAlignment);
540   u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
541   s.append("Tail contains: ");
542   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
543     s.append(".. ");
544   for (uptr i = 0; i < tail_size; i++)
545     s.append("%02x ", tail[i]);
546   s.append("\n");
547   s.append("Expected:      ");
548   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
549     s.append(".. ");
550   for (uptr i = 0; i < tail_size; i++)
551     s.append("%02x ", expected[i]);
552   s.append("\n");
553   s.append("               ");
554   for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
555     s.append("   ");
556   for (uptr i = 0; i < tail_size; i++)
557     s.append("%s ", expected[i] != tail[i] ? "^^" : "  ");
558 
559   s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
560     "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
561     "   char *x = new char[20];\n"
562     "   x[25] = 42;\n"
563     "%s does not detect such bugs in uninstrumented code at the time of write,"
564     "\nbut can detect them at the time of free/delete.\n"
565     "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
566     kShadowAlignment, SanitizerToolName);
567   Printf("%s", s.data());
568   GetCurrentThread()->Announce();
569 
570   tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
571   PrintTagsAroundAddr(tag_ptr);
572 
573   ReportErrorSummary(bug_type, stack);
574 }
575 
576 void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
577                        bool is_store, bool fatal, uptr *registers_frame) {
578   ScopedReport R(fatal);
579   SavedStackAllocations current_stack_allocations(
580       GetCurrentThread()->stack_allocations());
581 
582   Decorator d;
583   Printf("%s", d.Error());
584   uptr untagged_addr = UntagAddr(tagged_addr);
585   // TODO: when possible, try to print heap-use-after-free, etc.
586   const char *bug_type = "tag-mismatch";
587   uptr pc = stack->size ? stack->trace[0] : 0;
588   Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
589          untagged_addr, pc);
590 
591   Thread *t = GetCurrentThread();
592 
593   sptr offset =
594       __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
595   CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
596   tag_t ptr_tag = GetTagFromPointer(tagged_addr);
597   tag_t *tag_ptr =
598       reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
599   tag_t mem_tag = *tag_ptr;
600 
601   Printf("%s", d.Access());
602   Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
603          is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
604          mem_tag, t->unique_id());
605   if (offset != 0)
606     Printf("Invalid access starting at offset [%zu, %zu)\n", offset,
607            Min(access_size, static_cast<uptr>(offset) + (1 << kShadowScale)));
608   Printf("%s", d.Default());
609 
610   stack->Print();
611 
612   PrintAddressDescription(tagged_addr, access_size,
613                           current_stack_allocations.get());
614   t->Announce();
615 
616   PrintTagsAroundAddr(tag_ptr);
617 
618   if (registers_frame)
619     ReportRegisters(registers_frame, pc);
620 
621   ReportErrorSummary(bug_type, stack);
622 }
623 
624 // See the frame breakdown defined in __hwasan_tag_mismatch (from
625 // hwasan_tag_mismatch_aarch64.S).
626 void ReportRegisters(uptr *frame, uptr pc) {
627   Printf("Registers where the failure occurred (pc %p):\n", pc);
628 
629   // We explicitly print a single line (4 registers/line) each iteration to
630   // reduce the amount of logcat error messages printed. Each Printf() will
631   // result in a new logcat line, irrespective of whether a newline is present,
632   // and so we wish to reduce the number of Printf() calls we have to make.
633   Printf("    x0  %016llx  x1  %016llx  x2  %016llx  x3  %016llx\n",
634        frame[0], frame[1], frame[2], frame[3]);
635   Printf("    x4  %016llx  x5  %016llx  x6  %016llx  x7  %016llx\n",
636        frame[4], frame[5], frame[6], frame[7]);
637   Printf("    x8  %016llx  x9  %016llx  x10 %016llx  x11 %016llx\n",
638        frame[8], frame[9], frame[10], frame[11]);
639   Printf("    x12 %016llx  x13 %016llx  x14 %016llx  x15 %016llx\n",
640        frame[12], frame[13], frame[14], frame[15]);
641   Printf("    x16 %016llx  x17 %016llx  x18 %016llx  x19 %016llx\n",
642        frame[16], frame[17], frame[18], frame[19]);
643   Printf("    x20 %016llx  x21 %016llx  x22 %016llx  x23 %016llx\n",
644        frame[20], frame[21], frame[22], frame[23]);
645   Printf("    x24 %016llx  x25 %016llx  x26 %016llx  x27 %016llx\n",
646        frame[24], frame[25], frame[26], frame[27]);
647   Printf("    x28 %016llx  x29 %016llx  x30 %016llx\n",
648        frame[28], frame[29], frame[30]);
649 }
650 
651 }  // namespace __hwasan
652