1//===-- hwasan_report.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of HWAddressSanitizer.
10//
11// Error reporting.
12//===----------------------------------------------------------------------===//
13
14#include "hwasan_report.h"
15
16#include <dlfcn.h>
17
18#include "hwasan.h"
19#include "hwasan_allocator.h"
20#include "hwasan_globals.h"
21#include "hwasan_mapping.h"
22#include "hwasan_thread.h"
23#include "hwasan_thread_list.h"
24#include "sanitizer_common/sanitizer_allocator_internal.h"
25#include "sanitizer_common/sanitizer_array_ref.h"
26#include "sanitizer_common/sanitizer_common.h"
27#include "sanitizer_common/sanitizer_flags.h"
28#include "sanitizer_common/sanitizer_internal_defs.h"
29#include "sanitizer_common/sanitizer_mutex.h"
30#include "sanitizer_common/sanitizer_placement_new.h"
31#include "sanitizer_common/sanitizer_report_decorator.h"
32#include "sanitizer_common/sanitizer_stackdepot.h"
33#include "sanitizer_common/sanitizer_stacktrace_printer.h"
34#include "sanitizer_common/sanitizer_symbolizer.h"
35
36using namespace __sanitizer;
37
38namespace __hwasan {
39
40class ScopedReport {
41 public:
42 explicit ScopedReport(bool fatal) : fatal(fatal) {
43 Lock lock(&error_message_lock_);
44 error_message_ptr_ = &error_message_;
45 ++hwasan_report_count;
46 }
47
48 ~ScopedReport() {
49 void (*report_cb)(const char *);
50 {
51 Lock lock(&error_message_lock_);
52 report_cb = error_report_callback_;
53 error_message_ptr_ = nullptr;
54 }
55 if (report_cb)
56 report_cb(error_message_.data());
57 if (fatal)
58 SetAbortMessage(error_message_.data());
59 if (common_flags()->print_module_map >= 2 ||
60 (fatal && common_flags()->print_module_map))
61 DumpProcessMap();
62 if (fatal)
63 Die();
64 }
65
66 static void MaybeAppendToErrorMessage(const char *msg) {
67 Lock lock(&error_message_lock_);
68 if (!error_message_ptr_)
69 return;
70 error_message_ptr_->Append(str: msg);
71 }
72
73 static void SetErrorReportCallback(void (*callback)(const char *)) {
74 Lock lock(&error_message_lock_);
75 error_report_callback_ = callback;
76 }
77
78 private:
79 InternalScopedString error_message_;
80 bool fatal;
81
82 static Mutex error_message_lock_;
83 static InternalScopedString *error_message_ptr_
84 SANITIZER_GUARDED_BY(error_message_lock_);
85 static void (*error_report_callback_)(const char *);
86};
87
88Mutex ScopedReport::error_message_lock_;
89InternalScopedString *ScopedReport::error_message_ptr_;
90void (*ScopedReport::error_report_callback_)(const char *);
91
92// If there is an active ScopedReport, append to its error message.
93void AppendToErrorMessageBuffer(const char *buffer) {
94 ScopedReport::MaybeAppendToErrorMessage(msg: buffer);
95}
96
97static StackTrace GetStackTraceFromId(u32 id) {
98 CHECK(id);
99 StackTrace res = StackDepotGet(id);
100 CHECK(res.trace);
101 return res;
102}
103
104static void MaybePrintAndroidHelpUrl() {
105#if SANITIZER_ANDROID
106 Printf(
107 "Learn more about HWASan reports: "
108 "https://source.android.com/docs/security/test/memory-safety/"
109 "hwasan-reports\n");
110#endif
111}
112
113namespace {
114// A RAII object that holds a copy of the current thread stack ring buffer.
115// The actual stack buffer may change while we are iterating over it (for
116// example, Printf may call syslog() which can itself be built with hwasan).
117class SavedStackAllocations {
118 public:
119 SavedStackAllocations() = default;
120
121 explicit SavedStackAllocations(Thread *t) { CopyFrom(t); }
122
123 void CopyFrom(Thread *t) {
124 StackAllocationsRingBuffer *rb = t->stack_allocations();
125 uptr size = rb->size() * sizeof(uptr);
126 void *storage =
127 MmapAlignedOrDieOnFatalError(size, alignment: size * 2, mem_type: "saved stack allocations");
128 new (&rb_) StackAllocationsRingBuffer(*rb, storage);
129 thread_id_ = t->unique_id();
130 }
131
132 ~SavedStackAllocations() {
133 if (rb_) {
134 StackAllocationsRingBuffer *rb = get();
135 UnmapOrDie(addr: rb->StartOfStorage(), size: rb->size() * sizeof(uptr));
136 }
137 }
138
139 const StackAllocationsRingBuffer *get() const {
140 return (const StackAllocationsRingBuffer *)&rb_;
141 }
142
143 StackAllocationsRingBuffer *get() {
144 return (StackAllocationsRingBuffer *)&rb_;
145 }
146
147 u32 thread_id() const { return thread_id_; }
148
149 private:
150 uptr rb_ = 0;
151 u32 thread_id_;
152};
153
154class Decorator: public __sanitizer::SanitizerCommonDecorator {
155 public:
156 Decorator() : SanitizerCommonDecorator() { }
157 const char *Access() { return Blue(); }
158 const char *Allocation() const { return Magenta(); }
159 const char *Origin() const { return Magenta(); }
160 const char *Name() const { return Green(); }
161 const char *Location() { return Green(); }
162 const char *Thread() { return Green(); }
163};
164} // namespace
165
166static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
167 HeapAllocationRecord *har, uptr *ring_index,
168 uptr *num_matching_addrs,
169 uptr *num_matching_addrs_4b) {
170 if (!rb) return false;
171
172 *num_matching_addrs = 0;
173 *num_matching_addrs_4b = 0;
174 for (uptr i = 0, size = rb->size(); i < size; i++) {
175 auto h = (*rb)[i];
176 if (h.tagged_addr <= tagged_addr &&
177 h.tagged_addr + h.requested_size > tagged_addr) {
178 *har = h;
179 *ring_index = i;
180 return true;
181 }
182
183 // Measure the number of heap ring buffer entries that would have matched
184 // if we had only one entry per address (e.g. if the ring buffer data was
185 // stored at the address itself). This will help us tune the allocator
186 // implementation for MTE.
187 if (UntagAddr(tagged_addr: h.tagged_addr) <= UntagAddr(tagged_addr) &&
188 UntagAddr(tagged_addr: h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
189 ++*num_matching_addrs;
190 }
191
192 // Measure the number of heap ring buffer entries that would have matched
193 // if we only had 4 tag bits, which is the case for MTE.
194 auto untag_4b = [](uptr p) {
195 return p & ((1ULL << 60) - 1);
196 };
197 if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
198 untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
199 ++*num_matching_addrs_4b;
200 }
201 }
202 return false;
203}
204
205static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
206 tag_t addr_tag, uptr untagged_addr) {
207 uptr frames = Min(a: (uptr)flags()->stack_history_size, b: sa->size());
208 bool found_local = false;
209 InternalScopedString location;
210 for (uptr i = 0; i < frames; i++) {
211 const uptr *record_addr = &(*sa)[i];
212 uptr record = *record_addr;
213 if (!record)
214 break;
215 tag_t base_tag =
216 reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
217 const uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
218 CHECK_LT(fp, kRecordFPModulus);
219 uptr pc_mask = (1ULL << kRecordFPShift) - 1;
220 uptr pc = record & pc_mask;
221 FrameInfo frame;
222 if (!Symbolizer::GetOrInit()->SymbolizeFrame(address: pc, info: &frame))
223 continue;
224 for (LocalInfo &local : frame.locals) {
225 if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
226 continue;
227 if (!(local.name && internal_strlen(s: local.name)) &&
228 !(local.function_name && internal_strlen(s: local.function_name)) &&
229 !(local.decl_file && internal_strlen(s: local.decl_file)))
230 continue;
231 tag_t obj_tag = base_tag ^ local.tag_offset;
232 if (obj_tag != addr_tag)
233 continue;
234
235 // We only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero).
236 // So we know only `FP % kRecordFPModulus`, and we can only calculate
237 // `local_beg % kRecordFPModulus`.
238 // Out of all possible `local_beg` we will only consider 2 candidates
239 // nearest to the `untagged_addr`.
240 uptr local_beg_mod = (fp + local.frame_offset) % kRecordFPModulus;
241 // Pick `local_beg` in the same 1 MiB block as `untagged_addr`.
242 uptr local_beg =
243 RoundDownTo(x: untagged_addr, boundary: kRecordFPModulus) + local_beg_mod;
244 // Pick the largest `local_beg <= untagged_addr`. It's either the current
245 // one or the one before.
246 if (local_beg > untagged_addr)
247 local_beg -= kRecordFPModulus;
248
249 uptr offset = -1ull;
250 const char *whence;
251 const char *cause = nullptr;
252 uptr best_beg;
253
254 // Try two 1 MiB blocks options and pick nearest one.
255 for (uptr i = 0; i < 2; ++i, local_beg += kRecordFPModulus) {
256 uptr local_end = local_beg + local.size;
257 if (local_beg > local_end)
258 continue; // This is a wraparound.
259 if (local_beg <= untagged_addr && untagged_addr < local_end) {
260 offset = untagged_addr - local_beg;
261 whence = "inside";
262 cause = "use-after-scope";
263 best_beg = local_beg;
264 break; // This is as close at it can be.
265 }
266
267 if (untagged_addr >= local_end) {
268 uptr new_offset = untagged_addr - local_end;
269 if (new_offset < offset) {
270 offset = new_offset;
271 whence = "after";
272 cause = "stack-buffer-overflow";
273 best_beg = local_beg;
274 }
275 } else {
276 uptr new_offset = local_beg - untagged_addr;
277 if (new_offset < offset) {
278 offset = new_offset;
279 whence = "before";
280 cause = "stack-buffer-overflow";
281 best_beg = local_beg;
282 }
283 }
284 }
285
286 // To fail the `untagged_addr` must be near nullptr, which is impossible
287 // with Linux user space memory layout.
288 if (!cause)
289 continue;
290
291 if (!found_local) {
292 Printf(format: "\nPotentially referenced stack objects:\n");
293 found_local = true;
294 }
295
296 Decorator d;
297 Printf(format: "%s", d.Error());
298 Printf(format: "Cause: %s\n", cause);
299 Printf(format: "%s", d.Default());
300 Printf(format: "%s", d.Location());
301 StackTracePrinter::GetOrInit()->RenderSourceLocation(
302 buffer: &location, file: local.decl_file, line: local.decl_line, /* column= */ 0,
303 vs_style: common_flags()->symbolize_vs_style,
304 strip_path_prefix: common_flags()->strip_path_prefix);
305 Printf(
306 format: "%p is located %zd bytes %s a %zd-byte local variable %s "
307 "[%p,%p) "
308 "in %s %s\n",
309 (void *)untagged_addr, offset, whence, local.size, local.name,
310 (void *)best_beg, (void *)(best_beg + local.size),
311 local.function_name, location.data());
312 location.clear();
313 Printf(format: "%s\n", d.Default());
314 }
315 frame.Clear();
316 }
317
318 if (found_local)
319 return;
320
321 // We didn't find any locals. Most likely we don't have symbols, so dump
322 // the information that we have for offline analysis.
323 InternalScopedString frame_desc;
324 Printf(format: "Previously allocated frames:\n");
325 for (uptr i = 0; i < frames; i++) {
326 const uptr *record_addr = &(*sa)[i];
327 uptr record = *record_addr;
328 if (!record)
329 break;
330 uptr pc_mask = (1ULL << 48) - 1;
331 uptr pc = record & pc_mask;
332 frame_desc.AppendF(format: " record_addr:%p record:0x%zx",
333 reinterpret_cast<const void *>(record_addr), record);
334 SymbolizedStackHolder symbolized_stack(
335 Symbolizer::GetOrInit()->SymbolizePC(address: pc));
336 const SymbolizedStack *frame = symbolized_stack.get();
337 if (frame) {
338 StackTracePrinter::GetOrInit()->RenderFrame(
339 buffer: &frame_desc, format: " %F %L", frame_no: 0, address: frame->info.address, info: &frame->info,
340 vs_style: common_flags()->symbolize_vs_style,
341 strip_path_prefix: common_flags()->strip_path_prefix);
342 }
343 Printf(format: "%s\n", frame_desc.data());
344 frame_desc.clear();
345 }
346}
347
348// Returns true if tag == *tag_ptr, reading tags from short granules if
349// necessary. This may return a false positive if tags 1-15 are used as a
350// regular tag rather than a short granule marker.
351static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
352 if (tag == *tag_ptr)
353 return true;
354 if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
355 return false;
356 uptr mem = ShadowToMem(shadow_addr: reinterpret_cast<uptr>(tag_ptr));
357 tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
358 return tag == inline_tag;
359}
360
361// HWASan globals store the size of the global in the descriptor. In cases where
362// we don't have a binary with symbols, we can't grab the size of the global
363// from the debug info - but we might be able to retrieve it from the
364// descriptor. Returns zero if the lookup failed.
365static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
366 // Find the ELF object that this global resides in.
367 Dl_info info;
368 if (dladdr(address: reinterpret_cast<void *>(ptr), info: &info) == 0)
369 return 0;
370 auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
371 auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
372 reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
373
374 // Get the load bias. This is normally the same as the dli_fbase address on
375 // position-independent code, but can be different on non-PIE executables,
376 // binaries using LLD's partitioning feature, or binaries compiled with a
377 // linker script.
378 ElfW(Addr) load_bias = 0;
379 for (const auto &phdr :
380 ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
381 if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
382 continue;
383 load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
384 break;
385 }
386
387 // Walk all globals in this ELF object, looking for the one we're interested
388 // in. Once we find it, we can stop iterating and return the size of the
389 // global we're interested in.
390 for (const hwasan_global &global :
391 HwasanGlobalsFor(base: load_bias, phdr: phdr_begin, phnum: ehdr->e_phnum))
392 if (global.addr() <= ptr && ptr < global.addr() + global.size())
393 return global.size();
394
395 return 0;
396}
397
398void ReportStats() {}
399
400constexpr uptr kDumpWidth = 16;
401constexpr uptr kShadowLines = 17;
402constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth;
403
404constexpr uptr kShortLines = 3;
405constexpr uptr kShortDumpSize = kShortLines * kDumpWidth;
406constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth;
407
408static uptr GetPrintTagStart(uptr addr) {
409 addr = MemToShadow(untagged_addr: addr);
410 addr = RoundDownTo(x: addr, boundary: kDumpWidth);
411 addr -= kDumpWidth * (kShadowLines / 2);
412 return addr;
413}
414
415template <typename PrintTag>
416static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
417 InternalScopedString &s,
418 PrintTag print_tag) {
419 uptr center_row_beg = RoundDownTo(x: addr, boundary: kDumpWidth);
420 uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2);
421 uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2);
422 for (uptr row = beg_row; row < end_row; row += kDumpWidth) {
423 s.Append(str: row == center_row_beg ? "=>" : " ");
424 s.AppendF(format: "%p:", (void *)ShadowToMem(shadow_addr: row));
425 for (uptr i = 0; i < kDumpWidth; i++) {
426 s.Append(str: row + i == addr ? "[" : " ");
427 print_tag(s, row + i);
428 s.Append(str: row + i == addr ? "]" : " ");
429 }
430 s.Append(str: "\n");
431 }
432}
433
434template <typename GetTag, typename GetShortTag>
435static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
436 GetShortTag get_short_tag) {
437 InternalScopedString s;
438 addr = MemToShadow(untagged_addr: addr);
439 s.AppendF(
440 format: "\nMemory tags around the buggy address (one tag corresponds to %zd "
441 "bytes):\n",
442 kShadowAlignment);
443 PrintTagInfoAroundAddr(addr, kShadowLines, s,
444 [&](InternalScopedString &s, uptr tag_addr) {
445 tag_t tag = get_tag(tag_addr);
446 s.AppendF(format: "%02x", tag);
447 });
448
449 s.AppendF(
450 format: "Tags for short granules around the buggy address (one tag corresponds "
451 "to %zd bytes):\n",
452 kShadowAlignment);
453 PrintTagInfoAroundAddr(addr, kShortLines, s,
454 [&](InternalScopedString &s, uptr tag_addr) {
455 tag_t tag = get_tag(tag_addr);
456 if (tag >= 1 && tag <= kShadowAlignment) {
457 tag_t short_tag = get_short_tag(tag_addr);
458 s.AppendF(format: "%02x", short_tag);
459 } else {
460 s.Append(str: "..");
461 }
462 });
463 s.Append(
464 str: "See "
465 "https://clang.llvm.org/docs/"
466 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
467 "description of short granule tags\n");
468 Printf(format: "%s", s.data());
469}
470
471static uptr GetTopPc(const StackTrace *stack) {
472 return stack->size ? StackTrace::GetPreviousInstructionPc(pc: stack->trace[0])
473 : 0;
474}
475
476namespace {
477class BaseReport {
478 public:
479 BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size)
480 : scoped_report(fatal),
481 stack(stack),
482 tagged_addr(tagged_addr),
483 access_size(access_size),
484 untagged_addr(UntagAddr(tagged_addr)),
485 ptr_tag(GetTagFromPointer(p: tagged_addr)),
486 mismatch_offset(FindMismatchOffset()),
487 heap(CopyHeapChunk()),
488 allocations(CopyAllocations()),
489 candidate(FindBufferOverflowCandidate()),
490 shadow(CopyShadow()) {}
491
492 protected:
493 struct OverflowCandidate {
494 uptr untagged_addr = 0;
495 bool after = false;
496 bool is_close = false;
497
498 struct {
499 uptr begin = 0;
500 uptr end = 0;
501 u32 thread_id = 0;
502 u32 stack_id = 0;
503 bool is_allocated = false;
504 } heap;
505 };
506
507 struct HeapAllocation {
508 HeapAllocationRecord har = {};
509 uptr ring_index = 0;
510 uptr num_matching_addrs = 0;
511 uptr num_matching_addrs_4b = 0;
512 u32 free_thread_id = 0;
513 };
514
515 struct Allocations {
516 ArrayRef<SavedStackAllocations> stack;
517 ArrayRef<HeapAllocation> heap;
518 };
519
520 struct HeapChunk {
521 uptr begin = 0;
522 uptr size = 0;
523 u32 stack_id = 0;
524 bool from_small_heap = false;
525 bool is_allocated = false;
526 };
527
528 struct Shadow {
529 uptr addr = 0;
530 tag_t tags[kShadowDumpSize] = {};
531 tag_t short_tags[kShortDumpSize] = {};
532 };
533
534 sptr FindMismatchOffset() const;
535 Shadow CopyShadow() const;
536 tag_t GetTagCopy(uptr addr) const;
537 tag_t GetShortTagCopy(uptr addr) const;
538 HeapChunk CopyHeapChunk() const;
539 Allocations CopyAllocations();
540 OverflowCandidate FindBufferOverflowCandidate() const;
541 void PrintAddressDescription() const;
542 void PrintHeapOrGlobalCandidate() const;
543 void PrintTags(uptr addr) const;
544
545 SavedStackAllocations stack_allocations_storage[16];
546 HeapAllocation heap_allocations_storage[256];
547
548 const ScopedReport scoped_report;
549 const StackTrace *stack = nullptr;
550 const uptr tagged_addr = 0;
551 const uptr access_size = 0;
552 const uptr untagged_addr = 0;
553 const tag_t ptr_tag = 0;
554 const sptr mismatch_offset = 0;
555
556 const HeapChunk heap;
557 const Allocations allocations;
558 const OverflowCandidate candidate;
559
560 const Shadow shadow;
561};
562
563sptr BaseReport::FindMismatchOffset() const {
564 if (!access_size)
565 return 0;
566 sptr offset =
567 __hwasan_test_shadow(x: reinterpret_cast<void *>(tagged_addr), size: access_size);
568 CHECK_GE(offset, 0);
569 CHECK_LT(offset, static_cast<sptr>(access_size));
570 tag_t *tag_ptr =
571 reinterpret_cast<tag_t *>(MemToShadow(untagged_addr: untagged_addr + offset));
572 tag_t mem_tag = *tag_ptr;
573
574 if (mem_tag && mem_tag < kShadowAlignment) {
575 tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
576 ~(kShadowAlignment - 1));
577 // If offset is 0, (untagged_addr + offset) is not aligned to granules.
578 // This is the offset of the leftmost accessed byte within the bad granule.
579 u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
580 tag_t short_tag = granule_ptr[kShadowAlignment - 1];
581 // The first mismatch was a short granule that matched the ptr_tag.
582 if (short_tag == ptr_tag) {
583 // If the access starts after the end of the short granule, then the first
584 // bad byte is the first byte of the access; otherwise it is the first
585 // byte past the end of the short granule
586 if (mem_tag > in_granule_offset) {
587 offset += mem_tag - in_granule_offset;
588 }
589 }
590 }
591 return offset;
592}
593
594BaseReport::Shadow BaseReport::CopyShadow() const {
595 Shadow result;
596 if (!MemIsApp(p: untagged_addr))
597 return result;
598
599 result.addr = GetPrintTagStart(addr: untagged_addr + mismatch_offset);
600 uptr tag_addr = result.addr;
601 uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags);
602 for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) {
603 if (!MemIsShadow(p: tag_addr))
604 continue;
605 result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr);
606 if (i < kShortDumpOffset || i >= short_end)
607 continue;
608 uptr granule_addr = ShadowToMem(shadow_addr: tag_addr);
609 if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment &&
610 IsAccessibleMemoryRange(beg: granule_addr, size: kShadowAlignment)) {
611 result.short_tags[i - kShortDumpOffset] =
612 *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1);
613 }
614 }
615 return result;
616}
617
618tag_t BaseReport::GetTagCopy(uptr addr) const {
619 CHECK_GE(addr, shadow.addr);
620 uptr idx = addr - shadow.addr;
621 CHECK_LT(idx, ARRAY_SIZE(shadow.tags));
622 return shadow.tags[idx];
623}
624
625tag_t BaseReport::GetShortTagCopy(uptr addr) const {
626 CHECK_GE(addr, shadow.addr + kShortDumpOffset);
627 uptr idx = addr - shadow.addr - kShortDumpOffset;
628 CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags));
629 return shadow.short_tags[idx];
630}
631
632BaseReport::HeapChunk BaseReport::CopyHeapChunk() const {
633 HeapChunk result = {};
634 if (MemIsShadow(p: untagged_addr))
635 return result;
636 HwasanChunkView chunk = FindHeapChunkByAddress(address: untagged_addr);
637 result.begin = chunk.Beg();
638 if (result.begin) {
639 result.size = chunk.ActualSize();
640 result.from_small_heap = chunk.FromSmallHeap();
641 result.is_allocated = chunk.IsAllocated();
642 result.stack_id = chunk.GetAllocStackId();
643 }
644 return result;
645}
646
647BaseReport::Allocations BaseReport::CopyAllocations() {
648 if (MemIsShadow(p: untagged_addr))
649 return {};
650 uptr stack_allocations_count = 0;
651 uptr heap_allocations_count = 0;
652 hwasanThreadList().VisitAllLiveThreads(cb: [&](Thread *t) {
653 if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) &&
654 t->AddrIsInStack(addr: untagged_addr)) {
655 stack_allocations_storage[stack_allocations_count++].CopyFrom(t);
656 }
657
658 if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) {
659 // Scan all threads' ring buffers to find if it's a heap-use-after-free.
660 HeapAllocationRecord har;
661 uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
662 if (FindHeapAllocation(rb: t->heap_allocations(), tagged_addr, har: &har,
663 ring_index: &ring_index, num_matching_addrs: &num_matching_addrs,
664 num_matching_addrs_4b: &num_matching_addrs_4b)) {
665 auto &ha = heap_allocations_storage[heap_allocations_count++];
666 ha.har = har;
667 ha.ring_index = ring_index;
668 ha.num_matching_addrs = num_matching_addrs;
669 ha.num_matching_addrs_4b = num_matching_addrs_4b;
670 ha.free_thread_id = t->unique_id();
671 }
672 }
673 });
674
675 return {.stack: {stack_allocations_storage, stack_allocations_count},
676 .heap: {heap_allocations_storage, heap_allocations_count}};
677}
678
679BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const {
680 OverflowCandidate result = {};
681 if (MemIsShadow(p: untagged_addr))
682 return result;
683 // Check if this looks like a heap buffer overflow by scanning
684 // the shadow left and right and looking for the first adjacent
685 // object with a different memory tag. If that tag matches ptr_tag,
686 // check the allocator if it has a live chunk there.
687 tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
688 tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr;
689 uptr candidate_distance = 0;
690 for (; candidate_distance < 1000; candidate_distance++) {
691 if (MemIsShadow(p: reinterpret_cast<uptr>(left)) && TagsEqual(tag: ptr_tag, tag_ptr: left)) {
692 candidate_tag_ptr = left;
693 break;
694 }
695 --left;
696 if (MemIsShadow(p: reinterpret_cast<uptr>(right)) &&
697 TagsEqual(tag: ptr_tag, tag_ptr: right)) {
698 candidate_tag_ptr = right;
699 break;
700 }
701 ++right;
702 }
703
704 constexpr auto kCloseCandidateDistance = 1;
705 result.is_close = candidate_distance <= kCloseCandidateDistance;
706
707 result.after = candidate_tag_ptr == left;
708 result.untagged_addr = ShadowToMem(shadow_addr: reinterpret_cast<uptr>(candidate_tag_ptr));
709 HwasanChunkView chunk = FindHeapChunkByAddress(address: result.untagged_addr);
710 if (chunk.IsAllocated()) {
711 result.heap.is_allocated = true;
712 result.heap.begin = chunk.Beg();
713 result.heap.end = chunk.End();
714 result.heap.thread_id = chunk.GetAllocThreadId();
715 result.heap.stack_id = chunk.GetAllocStackId();
716 }
717 return result;
718}
719
720void BaseReport::PrintHeapOrGlobalCandidate() const {
721 Decorator d;
722 if (candidate.heap.is_allocated) {
723 uptr offset;
724 const char *whence;
725 if (candidate.heap.begin <= untagged_addr &&
726 untagged_addr < candidate.heap.end) {
727 offset = untagged_addr - candidate.heap.begin;
728 whence = "inside";
729 } else if (candidate.after) {
730 offset = untagged_addr - candidate.heap.end;
731 whence = "after";
732 } else {
733 offset = candidate.heap.begin - untagged_addr;
734 whence = "before";
735 }
736 Printf(format: "%s", d.Error());
737 Printf(format: "\nCause: heap-buffer-overflow\n");
738 Printf(format: "%s", d.Default());
739 Printf(format: "%s", d.Location());
740 Printf(format: "%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
741 (void*)untagged_addr, offset, whence,
742 candidate.heap.end - candidate.heap.begin,
743 (void*)candidate.heap.begin, (void*)candidate.heap.end);
744 Printf(format: "%s", d.Allocation());
745 Printf(format: "allocated by thread T%u here:\n", candidate.heap.thread_id);
746 Printf(format: "%s", d.Default());
747 GetStackTraceFromId(id: candidate.heap.stack_id).Print();
748 return;
749 }
750 // Check whether the address points into a loaded library. If so, this is
751 // most likely a global variable.
752 const char *module_name;
753 uptr module_address;
754 Symbolizer *sym = Symbolizer::GetOrInit();
755 if (sym->GetModuleNameAndOffsetForPC(pc: candidate.untagged_addr, module_name: &module_name,
756 module_address: &module_address)) {
757 Printf(format: "%s", d.Error());
758 Printf(format: "\nCause: global-overflow\n");
759 Printf(format: "%s", d.Default());
760 DataInfo info;
761 Printf(format: "%s", d.Location());
762 if (sym->SymbolizeData(address: candidate.untagged_addr, info: &info) && info.start) {
763 Printf(
764 format: "%p is located %zd bytes %s a %zd-byte global variable "
765 "%s [%p,%p) in %s\n",
766 (void *)untagged_addr,
767 candidate.after ? untagged_addr - (info.start + info.size)
768 : info.start - untagged_addr,
769 candidate.after ? "after" : "before", info.size, info.name,
770 (void *)info.start, (void *)(info.start + info.size), module_name);
771 } else {
772 uptr size = GetGlobalSizeFromDescriptor(ptr: candidate.untagged_addr);
773 if (size == 0)
774 // We couldn't find the size of the global from the descriptors.
775 Printf(
776 format: "%p is located %s a global variable in "
777 "\n #0 0x%x (%s+0x%x)\n",
778 (void*)untagged_addr, candidate.after ? "after" : "before",
779 (u32)candidate.untagged_addr, module_name, (u32)module_address);
780 else
781 Printf(
782 format: "%p is located %s a %zd-byte global variable in "
783 "\n #0 0x%x (%s+0x%x)\n",
784 (void*)untagged_addr, candidate.after ? "after" : "before", size,
785 (u32)candidate.untagged_addr, module_name, (u32)module_address);
786 }
787 Printf(format: "%s", d.Default());
788 }
789}
790
791void BaseReport::PrintAddressDescription() const {
792 Decorator d;
793 int num_descriptions_printed = 0;
794
795 if (MemIsShadow(p: untagged_addr)) {
796 Printf(format: "%s%p is HWAsan shadow memory.\n%s", d.Location(),
797 (void *)untagged_addr, d.Default());
798 return;
799 }
800
801 // Print some very basic information about the address, if it's a heap.
802 if (heap.begin) {
803 Printf(
804 format: "%s[%p,%p) is a %s %s heap chunk; "
805 "size: %zd offset: %zd\n%s",
806 d.Location(), (void *)heap.begin, (void *)(heap.begin + heap.size),
807 heap.from_small_heap ? "small" : "large",
808 heap.is_allocated ? "allocated" : "unallocated", heap.size,
809 untagged_addr - heap.begin, d.Default());
810 }
811
812 auto announce_by_id = [](u32 thread_id) {
813 hwasanThreadList().VisitAllLiveThreads(cb: [&](Thread *t) {
814 if (thread_id == t->unique_id())
815 t->Announce();
816 });
817 };
818
819 // Check stack first. If the address is on the stack of a live thread, we
820 // know it cannot be a heap / global overflow.
821 for (const auto &sa : allocations.stack) {
822 Printf(format: "%s", d.Error());
823 Printf(format: "\nCause: stack tag-mismatch\n");
824 Printf(format: "%s", d.Location());
825 Printf(format: "Address %p is located in stack of thread T%zd\n",
826 (void *)untagged_addr, (ssize)sa.thread_id());
827 Printf(format: "%s", d.Default());
828 announce_by_id(sa.thread_id());
829 PrintStackAllocations(sa: sa.get(), addr_tag: ptr_tag, untagged_addr);
830 num_descriptions_printed++;
831 }
832
833 if (allocations.stack.empty() && candidate.untagged_addr &&
834 candidate.is_close) {
835 PrintHeapOrGlobalCandidate();
836 num_descriptions_printed++;
837 }
838
839 for (const auto &ha : allocations.heap) {
840 const HeapAllocationRecord har = ha.har;
841
842 Printf(format: "%s", d.Error());
843 Printf(format: "\nCause: use-after-free\n");
844 Printf(format: "%s", d.Location());
845 Printf(format: "%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
846 (void*)untagged_addr, untagged_addr - UntagAddr(tagged_addr: har.tagged_addr),
847 (ssize)har.requested_size, (void*)UntagAddr(tagged_addr: har.tagged_addr),
848 (void*)(UntagAddr(tagged_addr: har.tagged_addr) + har.requested_size));
849 Printf(format: "%s", d.Allocation());
850 Printf(format: "freed by thread T%u here:\n", ha.free_thread_id);
851 Printf(format: "%s", d.Default());
852 GetStackTraceFromId(id: har.free_context_id).Print();
853
854 Printf(format: "%s", d.Allocation());
855 Printf(format: "previously allocated by thread T%u here:\n", har.alloc_thread_id);
856 Printf(format: "%s", d.Default());
857 GetStackTraceFromId(id: har.alloc_context_id).Print();
858
859 // Print a developer note: the index of this heap object
860 // in the thread's deallocation ring buffer.
861 Printf(format: "hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
862 (ssize)flags()->heap_history_size);
863 Printf(format: "hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
864 Printf(format: "hwasan_dev_note_num_matching_addrs_4b: %zd\n",
865 ha.num_matching_addrs_4b);
866
867 announce_by_id(ha.free_thread_id);
868 // TODO: announce_by_id(har.alloc_thread_id);
869 num_descriptions_printed++;
870 }
871
872 if (candidate.untagged_addr && num_descriptions_printed == 0) {
873 PrintHeapOrGlobalCandidate();
874 num_descriptions_printed++;
875 }
876
877 // Print the remaining threads, as an extra information, 1 line per thread.
878 if (flags()->print_live_threads_info) {
879 Printf(format: "\n");
880 hwasanThreadList().VisitAllLiveThreads(cb: [&](Thread *t) { t->Announce(); });
881 }
882
883 if (!num_descriptions_printed)
884 // We exhausted our possibilities. Bail out.
885 Printf(format: "HWAddressSanitizer can not describe address in more detail.\n");
886 if (num_descriptions_printed > 1) {
887 Printf(
888 format: "There are %d potential causes, printed above in order "
889 "of likeliness.\n",
890 num_descriptions_printed);
891 }
892}
893
894void BaseReport::PrintTags(uptr addr) const {
895 if (shadow.addr) {
896 PrintTagsAroundAddr(
897 addr, get_tag: [&](uptr addr) { return GetTagCopy(addr); },
898 get_short_tag: [&](uptr addr) { return GetShortTagCopy(addr); });
899 }
900}
901
902class InvalidFreeReport : public BaseReport {
903 public:
904 InvalidFreeReport(StackTrace *stack, uptr tagged_addr)
905 : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {}
906 ~InvalidFreeReport();
907
908 private:
909};
910
911InvalidFreeReport::~InvalidFreeReport() {
912 Decorator d;
913 Printf(format: "%s", d.Error());
914 uptr pc = GetTopPc(stack);
915 const char *bug_type = "invalid-free";
916 const Thread *thread = GetCurrentThread();
917 if (thread) {
918 Report(format: "ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
919 SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc,
920 (ssize)thread->unique_id());
921 } else {
922 Report(format: "ERROR: %s: %s on address %p at pc %p on unknown thread\n",
923 SanitizerToolName, bug_type, (void *)untagged_addr, (void *)pc);
924 }
925 Printf(format: "%s", d.Access());
926 if (shadow.addr) {
927 Printf(format: "tags: %02x/%02x (ptr/mem)\n", ptr_tag,
928 GetTagCopy(addr: MemToShadow(untagged_addr)));
929 }
930 Printf(format: "%s", d.Default());
931
932 stack->Print();
933
934 PrintAddressDescription();
935 PrintTags(addr: untagged_addr);
936 MaybePrintAndroidHelpUrl();
937 ReportErrorSummary(error_type: bug_type, trace: stack);
938}
939
940class TailOverwrittenReport : public BaseReport {
941 public:
942 explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr,
943 uptr orig_size, const u8 *expected)
944 : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0),
945 orig_size(orig_size),
946 tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) {
947 CHECK_GT(tail_size, 0U);
948 CHECK_LT(tail_size, kShadowAlignment);
949 internal_memcpy(dest: tail_copy,
950 src: reinterpret_cast<u8 *>(untagged_addr + orig_size),
951 n: tail_size);
952 internal_memcpy(dest: actual_expected, src: expected, n: tail_size);
953 // Short granule is stashed in the last byte of the magic string. To avoid
954 // confusion, make the expected magic string contain the short granule tag.
955 if (orig_size % kShadowAlignment != 0)
956 actual_expected[tail_size - 1] = ptr_tag;
957 }
958 ~TailOverwrittenReport();
959
960 private:
961 const uptr orig_size = 0;
962 const uptr tail_size = 0;
963 u8 actual_expected[kShadowAlignment] = {};
964 u8 tail_copy[kShadowAlignment] = {};
965};
966
967TailOverwrittenReport::~TailOverwrittenReport() {
968 Decorator d;
969 Printf(format: "%s", d.Error());
970 const char *bug_type = "allocation-tail-overwritten";
971 Report(format: "ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
972 bug_type, (void *)untagged_addr, (void *)(untagged_addr + orig_size),
973 orig_size);
974 Printf(format: "\n%s", d.Default());
975 Printf(
976 format: "Stack of invalid access unknown. Issue detected at deallocation "
977 "time.\n");
978 Printf(format: "%s", d.Allocation());
979 Printf(format: "deallocated here:\n");
980 Printf(format: "%s", d.Default());
981 stack->Print();
982 if (heap.begin) {
983 Printf(format: "%s", d.Allocation());
984 Printf(format: "allocated here:\n");
985 Printf(format: "%s", d.Default());
986 GetStackTraceFromId(id: heap.stack_id).Print();
987 }
988
989 InternalScopedString s;
990 u8 *tail = tail_copy;
991 s.Append(str: "Tail contains: ");
992 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(str: ".. ");
993 for (uptr i = 0; i < tail_size; i++) s.AppendF(format: "%02x ", tail[i]);
994 s.Append(str: "\n");
995 s.Append(str: "Expected: ");
996 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(str: ".. ");
997 for (uptr i = 0; i < tail_size; i++) s.AppendF(format: "%02x ", actual_expected[i]);
998 s.Append(str: "\n");
999 s.Append(str: " ");
1000 for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(str: " ");
1001 for (uptr i = 0; i < tail_size; i++)
1002 s.AppendF(format: "%s ", actual_expected[i] != tail[i] ? "^^" : " ");
1003
1004 s.AppendF(
1005 format: "\nThis error occurs when a buffer overflow overwrites memory\n"
1006 "after a heap object, but within the %zd-byte granule, e.g.\n"
1007 " char *x = new char[20];\n"
1008 " x[25] = 42;\n"
1009 "%s does not detect such bugs in uninstrumented code at the time of "
1010 "write,"
1011 "\nbut can detect them at the time of free/delete.\n"
1012 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
1013 kShadowAlignment, SanitizerToolName);
1014 Printf(format: "%s", s.data());
1015 GetCurrentThread()->Announce();
1016 PrintTags(addr: untagged_addr);
1017 MaybePrintAndroidHelpUrl();
1018 ReportErrorSummary(error_type: bug_type, trace: stack);
1019}
1020
1021class TagMismatchReport : public BaseReport {
1022 public:
1023 explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr,
1024 uptr access_size, bool is_store, bool fatal,
1025 uptr *registers_frame)
1026 : BaseReport(stack, fatal, tagged_addr, access_size),
1027 is_store(is_store),
1028 registers_frame(registers_frame) {}
1029 ~TagMismatchReport();
1030
1031 private:
1032 const bool is_store;
1033 const uptr *registers_frame;
1034};
1035
1036TagMismatchReport::~TagMismatchReport() {
1037 Decorator d;
1038 // TODO: when possible, try to print heap-use-after-free, etc.
1039 const char *bug_type = "tag-mismatch";
1040 uptr pc = GetTopPc(stack);
1041 Printf(format: "%s", d.Error());
1042 Report(format: "ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
1043 (void *)untagged_addr, (void *)pc);
1044
1045 Thread *t = GetCurrentThread();
1046
1047 tag_t mem_tag = GetTagCopy(addr: MemToShadow(untagged_addr: untagged_addr + mismatch_offset));
1048
1049 Printf(format: "%s", d.Access());
1050 if (mem_tag && mem_tag < kShadowAlignment) {
1051 tag_t short_tag =
1052 GetShortTagCopy(addr: MemToShadow(untagged_addr: untagged_addr + mismatch_offset));
1053 Printf(
1054 format: "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
1055 is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr,
1056 ptr_tag, mem_tag, short_tag, (ssize)t->unique_id());
1057 } else {
1058 Printf(format: "%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
1059 is_store ? "WRITE" : "READ", access_size, (void *)untagged_addr,
1060 ptr_tag, mem_tag, (ssize)t->unique_id());
1061 }
1062 if (mismatch_offset)
1063 Printf(format: "Invalid access starting at offset %zu\n", mismatch_offset);
1064 Printf(format: "%s", d.Default());
1065
1066 stack->Print();
1067
1068 PrintAddressDescription();
1069 t->Announce();
1070
1071 PrintTags(addr: untagged_addr + mismatch_offset);
1072
1073 if (registers_frame)
1074 ReportRegisters(registers_frame, pc);
1075
1076 MaybePrintAndroidHelpUrl();
1077 ReportErrorSummary(error_type: bug_type, trace: stack);
1078}
1079} // namespace
1080
1081void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
1082 InvalidFreeReport R(stack, tagged_addr);
1083}
1084
1085void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
1086 const u8 *expected) {
1087 TailOverwrittenReport R(stack, tagged_addr, orig_size, expected);
1088}
1089
1090void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
1091 bool is_store, bool fatal, uptr *registers_frame) {
1092 TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal,
1093 registers_frame);
1094}
1095
1096// See the frame breakdown defined in __hwasan_tag_mismatch (from
1097// hwasan_tag_mismatch_{aarch64,riscv64}.S).
1098void ReportRegisters(const uptr *frame, uptr pc) {
1099 Printf(format: "\nRegisters where the failure occurred (pc %p):\n", (void *)pc);
1100
1101 // We explicitly print a single line (4 registers/line) each iteration to
1102 // reduce the amount of logcat error messages printed. Each Printf() will
1103 // result in a new logcat line, irrespective of whether a newline is present,
1104 // and so we wish to reduce the number of Printf() calls we have to make.
1105#if defined(__aarch64__)
1106 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
1107 frame[0], frame[1], frame[2], frame[3]);
1108#elif SANITIZER_RISCV64
1109 Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
1110 reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2],
1111 frame[3]);
1112#endif
1113 Printf(format: " x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
1114 frame[4], frame[5], frame[6], frame[7]);
1115 Printf(format: " x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
1116 frame[8], frame[9], frame[10], frame[11]);
1117 Printf(format: " x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
1118 frame[12], frame[13], frame[14], frame[15]);
1119 Printf(format: " x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
1120 frame[16], frame[17], frame[18], frame[19]);
1121 Printf(format: " x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
1122 frame[20], frame[21], frame[22], frame[23]);
1123 Printf(format: " x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
1124 frame[24], frame[25], frame[26], frame[27]);
1125 // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
1126 // passes it to this function.
1127#if defined(__aarch64__)
1128 Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
1129 frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256);
1130#elif SANITIZER_RISCV64
1131 Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
1132 frame[29], frame[30], frame[31]);
1133#else
1134#endif
1135}
1136
1137} // namespace __hwasan
1138
1139void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
1140 __hwasan::ScopedReport::SetErrorReportCallback(callback);
1141}
1142